code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _a ( ) -> Dict: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(UpperCAmelCase ): requests.request('''GET''' , '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 ) @pytest.mark.integration def _a ( ) -> Dict: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''' , '''https://huggingface.co''' ) def _a ( ) -> Optional[int]: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(UpperCAmelCase ): http_head('''https://huggingface.co''' )
315
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowerCAmelCase : '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=2 , UpperCamelCase : str=True , UpperCamelCase : List[str]=False , UpperCamelCase : Tuple=10 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Tuple=32 * 4 , UpperCamelCase : Tuple=32 * 6 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : List[Any]=32 , ): '''simple docstring''' _snake_case : List[Any] = parent _snake_case : Optional[Any] = batch_size _snake_case : List[str] = is_training _snake_case : Optional[int] = use_auxiliary_loss _snake_case : Optional[Any] = num_queries _snake_case : Any = num_channels _snake_case : Union[str, Any] = min_size _snake_case : Dict = max_size _snake_case : str = num_labels _snake_case : List[Any] = mask_feature_size def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' _snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( UpperCamelCase ) _snake_case : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase ) _snake_case : List[str] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase ) > 0.5 ).float() _snake_case : Any = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase ) > 0.5).long() _snake_case : List[Any] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self : int ): '''simple docstring''' return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Tuple = self.prepare_config_and_inputs() _snake_case : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Any ): '''simple docstring''' _snake_case : int = output.encoder_hidden_states _snake_case : Tuple = output.pixel_decoder_hidden_states _snake_case : Tuple = output.transformer_decoder_hidden_states self.parent.assertTrue(len(UpperCamelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCamelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCamelCase ) , config.decoder_config.decoder_layers ) def UpperCamelCase_ ( self : Any , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : str=False ): '''simple docstring''' with torch.no_grad(): _snake_case : str = MaskFormerModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() _snake_case : Tuple = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase ) _snake_case : str = model(UpperCamelCase , output_hidden_states=UpperCamelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(UpperCamelCase , UpperCamelCase ) def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple ): '''simple docstring''' _snake_case : str = MaskFormerForInstanceSegmentation(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() def comm_check_on_output(UpperCamelCase : Optional[Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _snake_case : Tuple = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase ) _snake_case : Optional[Any] = model(UpperCamelCase ) comm_check_on_output(UpperCamelCase ) _snake_case : Union[str, Any] = model( pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase ) comm_check_on_output(UpperCamelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' a_ : Optional[Any] =(MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () a_ : Tuple =( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) a_ : Any =False a_ : List[str] =False a_ : List[str] =False a_ : Optional[Any] =False def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = MaskFormerModelTester(self ) _snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase ) def UpperCamelCase_ ( self : int ): '''simple docstring''' _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase ) @unittest.skip(reason='MaskFormer does not use inputs_embeds' ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='MaskFormer is not a generative model' ) def UpperCamelCase_ ( self : str ): '''simple docstring''' pass @unittest.skip(reason='MaskFormer does not use token embeddings' ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self : str ): '''simple docstring''' pass def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(UpperCamelCase ) _snake_case : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Optional[Any] = [*signature.parameters.keys()] _snake_case : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase ) @slow def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' for model_name in ["facebook/maskformer-swin-small-coco"]: _snake_case : int = MaskFormerModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' _snake_case : Any = (self.model_tester.min_size,) * 2 _snake_case : Optional[int] = { 'pixel_values': torch.randn((2, 3, *size) , device=UpperCamelCase ), 'mask_labels': torch.randn((2, 10, *size) , device=UpperCamelCase ), 'class_labels': torch.zeros(2 , 10 , device=UpperCamelCase ).long(), } _snake_case : Optional[int] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase ) _snake_case : Any = model(**UpperCamelCase ) self.assertTrue(outputs.loss is not None ) def UpperCamelCase_ ( self : int ): '''simple docstring''' _snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Union[str, Any] = model_class(UpperCamelCase ).to(UpperCamelCase ) _snake_case : Dict = model(**UpperCamelCase , output_attentions=UpperCamelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _snake_case : Dict = self.all_model_classes[1] _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() _snake_case : int = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.train() _snake_case : Optional[Any] = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase ).loss loss.backward() def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' _snake_case : List[Any] = self.all_model_classes[1] _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs() _snake_case : List[str] = True _snake_case : List[Any] = True _snake_case : List[Any] = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.train() _snake_case : Dict = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase ) _snake_case : List[str] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _snake_case : Any = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _snake_case : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _snake_case : List[str] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=UpperCamelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase_ = 1E-4 def lowerCamelCase_ ( )-> List[Any]: _snake_case : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self : Any ): '''simple docstring''' return ( MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' ) if is_vision_available() else None ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _snake_case : Optional[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(UpperCamelCase ) _snake_case : Dict = self.default_image_processor _snake_case : Tuple = prepare_img() _snake_case : str = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase ) _snake_case : int = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _snake_case : Union[str, Any] = model(**UpperCamelCase ) _snake_case : Tuple = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(UpperCamelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) ) _snake_case : Optional[int] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(UpperCamelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) ) _snake_case : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(UpperCamelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' ) .to(UpperCamelCase ) .eval() ) _snake_case : Any = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : List[str] = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase ) _snake_case : List[Any] = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _snake_case : Optional[Any] = model(**UpperCamelCase ) # masks_queries_logits _snake_case : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _snake_case : Any = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _snake_case : str = torch.tensor(UpperCamelCase ).to(UpperCamelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) ) # class_queries_logits _snake_case : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _snake_case : Tuple = torch.tensor( [ [1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0], [3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0], [1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0], ] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' _snake_case : Tuple = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' ) .to(UpperCamelCase ) .eval() ) _snake_case : int = self.default_image_processor _snake_case : Optional[int] = prepare_img() _snake_case : int = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase ) _snake_case : Optional[Any] = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _snake_case : List[Any] = model(**UpperCamelCase ) # masks_queries_logits _snake_case : Union[str, Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _snake_case : List[Any] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _snake_case : Union[str, Any] = torch.tensor(UpperCamelCase ).to(UpperCamelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) ) # class_queries_logits _snake_case : Union[str, Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _snake_case : Union[str, Any] = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' _snake_case : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' ) .to(UpperCamelCase ) .eval() ) _snake_case : Optional[int] = self.default_image_processor _snake_case : Dict = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='pt' , ) _snake_case : List[str] = inputs['pixel_values'].to(UpperCamelCase ) _snake_case : Tuple = [el.to(UpperCamelCase ) for el in inputs['mask_labels']] _snake_case : Optional[int] = [el.to(UpperCamelCase ) for el in inputs['class_labels']] with torch.no_grad(): _snake_case : List[str] = model(**UpperCamelCase ) self.assertTrue(outputs.loss is not None )
411
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { """configuration_distilbert""": [ """DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DistilBertConfig""", """DistilBertOnnxConfig""", ], """tokenization_distilbert""": ["""DistilBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["""DistilBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DistilBertForMaskedLM""", """DistilBertForMultipleChoice""", """DistilBertForQuestionAnswering""", """DistilBertForSequenceClassification""", """DistilBertForTokenClassification""", """DistilBertModel""", """DistilBertPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDistilBertForMaskedLM""", """TFDistilBertForMultipleChoice""", """TFDistilBertForQuestionAnswering""", """TFDistilBertForSequenceClassification""", """TFDistilBertForTokenClassification""", """TFDistilBertMainLayer""", """TFDistilBertModel""", """TFDistilBertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """FlaxDistilBertForMaskedLM""", """FlaxDistilBertForMultipleChoice""", """FlaxDistilBertForQuestionAnswering""", """FlaxDistilBertForSequenceClassification""", """FlaxDistilBertForTokenClassification""", """FlaxDistilBertModel""", """FlaxDistilBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Dict = batch_size _UpperCamelCase : Optional[int] = image_size _UpperCamelCase : str = num_channels _UpperCamelCase : Optional[Any] = embeddings_size _UpperCamelCase : Tuple = hidden_sizes _UpperCamelCase : Dict = depths _UpperCamelCase : str = is_training _UpperCamelCase : Optional[int] = use_labels _UpperCamelCase : str = hidden_act _UpperCamelCase : Optional[int] = num_labels _UpperCamelCase : Optional[int] = scope _UpperCamelCase : Tuple = len(__snake_case) _UpperCamelCase : Dict = out_features _UpperCamelCase : Union[str, Any] = out_indices _UpperCamelCase : int = num_groups def A__ ( self): _UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCamelCase : str = None if self.use_labels: _UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels) _UpperCamelCase : str = self.get_config() return config, pixel_values, labels def A__ ( self): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = BitModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Dict = self.num_labels _UpperCamelCase : Dict = BitForImageClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None _UpperCamelCase : Any = None _UpperCamelCase : str = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Any = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def A__ ( self): _UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs _UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def A__ ( self): _UpperCamelCase : Dict = BitModelTester(self) _UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case) def A__ ( self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self): return @unittest.skip(reason='Bit does not output attentions') def A__ ( self): pass @unittest.skip(reason='Bit does not use inputs_embeds') def A__ ( self): pass @unittest.skip(reason='Bit does not support input and output embeddings') def A__ ( self): pass def A__ ( self): _UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : int = model_class(__snake_case) _UpperCamelCase : List[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : Optional[int] = [*signature.parameters.keys()] _UpperCamelCase : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case) def A__ ( self): _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__snake_case) def A__ ( self): _UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Union[str, Any] = model_class(config=__snake_case) for name, module in model.named_modules(): if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def A__ ( self): def check_hidden_states_output(__snake_case , __snake_case , __snake_case): _UpperCamelCase : str = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): _UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case)) _UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase : str = self.model_tester.num_stages self.assertEqual(len(__snake_case) , expected_num_stages + 1) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : List[str] = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _UpperCamelCase : Any = layer_type _UpperCamelCase : Tuple = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase : List[str] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) @unittest.skip(reason='Bit does not use feedforward chunking') def A__ ( self): pass def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case) @slow def A__ ( self): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case) self.assertIsNotNone(__snake_case) def lowerCamelCase_ ( ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def A__ ( self): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def A__ ( self): _UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case) _UpperCamelCase : str = self.default_image_processor _UpperCamelCase : List[str] = prepare_img() _UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case) # forward pass with torch.no_grad(): _UpperCamelCase : Any = model(**__snake_case) # verify the logits _UpperCamelCase : Dict = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , __snake_case) _UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4)) @require_torch class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def A__ ( self): _UpperCamelCase : List[str] = BitModelTester(self)
648
1
def UpperCamelCase_ ( __a ) -> float: return 10 - x * x def UpperCamelCase_ ( __a , __a ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(__a ) * equation(__a ) >= 0: raise ValueError("Wrong space!" ) a__ : Any = a while (b - a) >= 0.01: # Find middle point a__ : str = (a + b) / 2 # Check if middle point is root if equation(__a ) == 0.0: break # Decide the side to repeat the steps if equation(__a ) * equation(__a ) < 0: a__ : Dict = c else: a__ : Tuple = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
37
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING UpperCAmelCase =logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE ) class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> str: super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ ) self.check_model_type(lowerCamelCase_ ) def UpperCamelCase__ ( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> List[Any]: A , A = {}, {} if padding is not None: A = padding if truncation is not None: A = truncation if top_k is not None: A = top_k return preprocess_params, {}, postprocess_params def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ) -> Dict: if isinstance(lowerCamelCase_ ,(Image.Image, str) ) and isinstance(lowerCamelCase_ ,lowerCamelCase_ ): A = {"""image""": image, """question""": question} else: A = image A = super().__call__(lowerCamelCase_ ,**lowerCamelCase_ ) return results def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=False ,lowerCamelCase_=False ) -> List[Any]: A = load_image(inputs["""image"""] ) A = self.tokenizer( inputs["""question"""] ,return_tensors=self.framework ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ) A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework ) model_inputs.update(lowerCamelCase_ ) return model_inputs def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]: A = self.model(**lowerCamelCase_ ) return model_outputs def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=5 ) -> int: if top_k > self.model.config.num_labels: A = self.model.config.num_labels if self.framework == "pt": A = model_outputs.logits.sigmoid()[0] A , A = probs.topk(lowerCamelCase_ ) else: raise ValueError(f'Unsupported framework: {self.framework}' ) A = scores.tolist() A = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase_ ,lowerCamelCase_ )]
617
0
'''simple docstring''' from typing import List import numpy as np def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : Optional[Any] = {key: len(_lowercase ) for key, value in gen_kwargs.items() if isinstance(_lowercase , _lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( """Sharding is ambiguous for this dataset: """ + """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n""" + """\n""".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """ + """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.""" ) ) UpperCAmelCase : Dict = max(lists_lengths.values() , default=0 ) return max(1 , _lowercase ) def __lowerCamelCase ( _lowercase , _lowercase ) -> List[range]: UpperCAmelCase : Dict = [] for group_idx in range(_lowercase ): UpperCAmelCase : Tuple = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break UpperCAmelCase : int = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 UpperCAmelCase : List[str] = range(_lowercase , start + num_shards_to_add ) shards_indices_per_group.append(_lowercase ) return shards_indices_per_group def __lowerCamelCase ( _lowercase , _lowercase ) -> List[dict]: UpperCAmelCase : List[Any] = _number_of_shards_in_gen_kwargs(_lowercase ) if num_shards == 1: return [dict(_lowercase )] else: UpperCAmelCase : int = _distribute_shards(num_shards=_lowercase , max_num_jobs=_lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_lowercase , _lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_lowercase ) ) ] def __lowerCamelCase ( _lowercase ) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowerCamelCase ( _lowercase , _lowercase ) -> dict: UpperCAmelCase : str = {len(_lowercase ) for value in gen_kwargs.values() if isinstance(_lowercase , _lowercase )} UpperCAmelCase : int = {} for size in list_sizes: UpperCAmelCase : int = list(range(_lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes UpperCAmelCase : List[Any] = dict(_lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(_lowercase , _lowercase ): UpperCAmelCase : Dict = [value[i] for i in indices_per_size[len(_lowercase )]] return shuffled_kwargs
672
'''simple docstring''' from datetime import datetime as dt import os from github import Github a : int = [ """good first issue""", """good second issue""", """good difficult issue""", """feature request""", """new model""", """wip""", ] def __lowerCamelCase ( ) -> Dict: UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] ) UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" ) UpperCAmelCase : int = repo.get_issues(state="""open""" ) for issue in open_issues: UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase ) UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="""closed""" ) elif ( (dt.utcnow() - issue.updated_at).days > 2_3 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
672
1
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class a : def __init__( self , _snake_case , _snake_case=99 , _snake_case=13 , _snake_case=7 , _snake_case=9 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case=8 , _snake_case=0.1 , _snake_case=0.002 , _snake_case=1 , _snake_case=0 , _snake_case=0 , _snake_case=None , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = encoder_seq_length lowerCAmelCase = decoder_seq_length # For common tests lowerCAmelCase = self.decoder_seq_length lowerCAmelCase = is_training lowerCAmelCase = use_attention_mask lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = d_ff lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = dropout_rate lowerCAmelCase = initializer_factor lowerCAmelCase = eos_token_id lowerCAmelCase = pad_token_id lowerCAmelCase = decoder_start_token_id lowerCAmelCase = None lowerCAmelCase = decoder_layers def UpperCamelCase__ ( self ): """simple docstring""" return TaConfig.from_pretrained('google/umt5-base' ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ): """simple docstring""" if attention_mask is None: lowerCAmelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_snake_case ) if decoder_head_mask is None: lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_snake_case ) if cross_attn_head_mask is None: lowerCAmelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=_snake_case ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 ) lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) lowerCAmelCase = self.get_config() lowerCAmelCase = config.num_attention_heads lowerCAmelCase = self.prepare_inputs_dict(_snake_case , _snake_case , _snake_case ) return config, input_dict def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCamelCase__ ( self ): """simple docstring""" return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = UMTaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model( input_ids=_snake_case , decoder_input_ids=_snake_case , attention_mask=_snake_case , decoder_attention_mask=_snake_case , ) lowerCAmelCase = model(input_ids=_snake_case , decoder_input_ids=_snake_case ) lowerCAmelCase = result.last_hidden_state lowerCAmelCase = result.past_key_values lowerCAmelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_snake_case ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval() # first forward pass lowerCAmelCase = model(_snake_case , use_cache=_snake_case ) lowerCAmelCase = model(_snake_case ) lowerCAmelCase = model(_snake_case , use_cache=_snake_case ) self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) ) self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 ) lowerCAmelCase ,lowerCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = model(_snake_case )['last_hidden_state'] lowerCAmelCase = model(_snake_case , past_key_values=_snake_case )['last_hidden_state'] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = UMTaModel(config=_snake_case ).to(_snake_case ).half().eval() lowerCAmelCase = model(**_snake_case )['last_hidden_state'] self.parent.assertFalse(torch.isnan(_snake_case ).any().item() ) @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) snake_case__ = (UMTaForConditionalGeneration,) if is_torch_available() else () snake_case__ = ( { '''conversational''': UMTaForConditionalGeneration, '''feature-extraction''': UMTaModel, '''summarization''': UMTaForConditionalGeneration, '''text2text-generation''': UMTaForConditionalGeneration, '''translation''': UMTaForConditionalGeneration, '''question-answering''': UMTaForQuestionAnswering, } if is_torch_available() else {} ) snake_case__ = True snake_case__ = False snake_case__ = False snake_case__ = True snake_case__ = True # The small UMT5 model needs higher percentages for CPU/MP tests snake_case__ = [0.8, 0.9] def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = UMTaModelTester(self ) @unittest.skip('Test has a segmentation fault on torch 1.8.0' ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(_snake_case ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=_snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions'] lowerCAmelCase = self.model_tester.prepare_config_and_inputs() lowerCAmelCase = config_and_inputs[0] lowerCAmelCase = UMTaForConditionalGeneration(_snake_case ).eval() model.to(_snake_case ) lowerCAmelCase = { 'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_snake_case ), 'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ), 'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ), } for attn_name, (name, mask) in zip(_snake_case , head_masking.items() ): lowerCAmelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": lowerCAmelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=_snake_case ) lowerCAmelCase = model.generate( config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_snake_case , return_dict_in_generate=_snake_case , **_snake_case , ) # We check the state of decoder_attentions and cross_attentions just from the last step lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): @slow @unittest.skip( 'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_snake_case ).to(_snake_case ) lowerCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_snake_case , legacy=_snake_case ) lowerCAmelCase = [ 'Bonjour monsieur <extra_id_0> bien <extra_id_1>.', 'No se como puedo <extra_id_0>.', 'This is the reason why we <extra_id_0> them.', 'The <extra_id_0> walks in <extra_id_1>, seats', 'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.', ] lowerCAmelCase = tokenizer(_snake_case , return_tensors='pt' , padding=_snake_case ).input_ids # fmt: off lowerCAmelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(_snake_case , _snake_case ) lowerCAmelCase = model.generate(input_ids.to(_snake_case ) ) lowerCAmelCase = [ '<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>', '<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', ] lowerCAmelCase = tokenizer.batch_decode(_snake_case ) self.assertEqual(_snake_case , _snake_case )
4
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { """google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""", # See all ViT models at https://huggingface.co/models?filter=vit } class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = '''vit''' def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : List[str] , ) -> List[str]: '''simple docstring''' super().__init__(**_UpperCAmelCase ) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = qkv_bias UpperCAmelCase_ = encoder_stride class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = version.parse('''1.11''' ) @property def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowercase__ ( self : Union[str, Any] ) -> float: '''simple docstring''' return 1e-4
82
0
'''simple docstring''' import numpy as np import qiskit def lowercase_ ( lowercase__ = 8 , lowercase__ = None ) ->str: _snake_case: Union[str, Any] = np.random.default_rng(seed=lowercase__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _snake_case: Tuple = 6 * key_len # Measurement basis for Alice's qubits. _snake_case: List[str] = rng.integers(2 , size=lowercase__ ) # The set of states Alice will prepare. _snake_case: List[Any] = rng.integers(2 , size=lowercase__ ) # Measurement basis for Bob's qubits. _snake_case: str = rng.integers(2 , size=lowercase__ ) # Quantum Circuit to simulate BB84 _snake_case: List[Any] = qiskit.QuantumCircuit(lowercase__ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(lowercase__ ): if alice_state[index] == 1: bbaa_circ.x(lowercase__ ) if alice_basis[index] == 1: bbaa_circ.h(lowercase__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(lowercase__ ): if bob_basis[index] == 1: bbaa_circ.h(lowercase__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _snake_case: str = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _snake_case: List[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1 , seed_simulator=lowercase__ ) # Returns the result of measurement. _snake_case: str = job.result().get_counts(lowercase__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _snake_case: List[Any] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( lowercase__ , lowercase__ , lowercase__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. _snake_case: List[str] = gen_key[:key_len] if len(lowercase__ ) >= key_len else gen_key.ljust(lowercase__ , '0' ) return key if __name__ == "__main__": print(F'The generated key is : {bbaa(8, seed=0)}') from doctest import testmod testmod()
273
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available A : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
273
1
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin snake_case = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class __A ( snake_case__ ,unittest.TestCase ): '''simple docstring''' a_ = AlbertTokenizer a_ = AlbertTokenizerFast a_ = True a_ = True a_ = True def SCREAMING_SNAKE_CASE__ ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : str = AlbertTokenizer(_snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case ): _lowerCAmelCase : Any = "this is a test" _lowerCAmelCase : Tuple = "this is a test" return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Tuple = "<pad>" _lowerCAmelCase : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "▁eloquent" ) self.assertEqual(len(_snake_case ) , 3_0000 ) def SCREAMING_SNAKE_CASE__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 ) def SCREAMING_SNAKE_CASE__ ( self ): if not self.test_rust_tokenizer: return _lowerCAmelCase : Tuple = self.get_tokenizer() _lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer() _lowerCAmelCase : List[Any] = "I was born in 92000, and this is falsé." _lowerCAmelCase : Any = tokenizer.tokenize(_snake_case ) _lowerCAmelCase : str = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) _lowerCAmelCase : Any = tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) _lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) self.assertListEqual(_snake_case , _snake_case ) _lowerCAmelCase : Dict = self.get_rust_tokenizer() _lowerCAmelCase : List[str] = tokenizer.encode(_snake_case ) _lowerCAmelCase : Tuple = rust_tokenizer.encode(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Any = AlbertTokenizer(_snake_case , keep_accents=_snake_case ) _lowerCAmelCase : Any = tokenizer.tokenize("This is a test" ) self.assertListEqual(_snake_case , ["▁this", "▁is", "▁a", "▁test"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [48, 25, 21, 1289] ) _lowerCAmelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _snake_case , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] ) _lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(_snake_case ) self.assertListEqual(_snake_case , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) _lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual( _snake_case , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , ) def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Dict = AlbertTokenizer(_snake_case ) _lowerCAmelCase : Union[str, Any] = tokenizer.encode("sequence builders" ) _lowerCAmelCase : Optional[int] = tokenizer.encode("multi-sequence build" ) _lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case ) _lowerCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def SCREAMING_SNAKE_CASE__ ( self ): # fmt: off _lowerCAmelCase : Optional[int] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
424
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
424
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCAmelCase : def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ): lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = image_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = num_stages lowerCAmelCase_ = hidden_sizes lowerCAmelCase_ = depths lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = type_sequence_label_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = out_features lowerCAmelCase_ = num_labels lowerCAmelCase_ = scope lowerCAmelCase_ = num_stages def UpperCAmelCase_ ( self ): lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ): return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def UpperCAmelCase_ ( self ): return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowerCAmelCase_ = UperNetForSemanticSegmentation(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase_ = model(_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCAmelCase_ ( self ): lowerCAmelCase_ = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) ,( lowerCAmelCase_ ) ,( lowerCAmelCase_ ) , ) = config_and_inputs lowerCAmelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( __a , __a , unittest.TestCase ): __A : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else () __A : List[Any] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} __A : Tuple = False __A : Union[str, Any] = False __A : List[str] = False __A : Dict = False __A : Union[str, Any] = False __A : Dict = False def UpperCAmelCase_ ( self ): lowerCAmelCase_ = UperNetModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def UpperCAmelCase_ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase_ ( self ): return def UpperCAmelCase_ ( self ): lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(_lowerCamelCase ) lowerCAmelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ = [*signature.parameters.keys()] lowerCAmelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def UpperCAmelCase_ ( self ): lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def UpperCAmelCase_ ( self ): pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def UpperCAmelCase_ ( self ): pass @unittest.skip(reason='''UperNet does not have a base model''' ) def UpperCAmelCase_ ( self ): pass @unittest.skip(reason='''UperNet does not have a base model''' ) def UpperCAmelCase_ ( self ): pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def UpperCAmelCase_ ( self ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCAmelCase_ ( self ): pass def UpperCAmelCase_ ( self ): def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowerCAmelCase_ = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): lowerCAmelCase_ = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) lowerCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase_ = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase_ ( self ): lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ = _config_zero_init(_lowerCamelCase ) lowerCAmelCase_ = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(config=_lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def UpperCAmelCase_ ( self ): pass @slow def UpperCAmelCase_ ( self ): for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case_ ( ) -> Any: lowerCAmelCase_ = hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''') lowerCAmelCase_ = Image.open(__snake_case).convert('''RGB''') return image @require_torch @require_vision @slow class __UpperCAmelCase ( unittest.TestCase ): def UpperCAmelCase_ ( self ): lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) lowerCAmelCase_ = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_lowerCamelCase ) lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) with torch.no_grad(): lowerCAmelCase_ = model(**_lowerCamelCase ) lowerCAmelCase_ = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) lowerCAmelCase_ = torch.tensor( [[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 ) ) def UpperCAmelCase_ ( self ): lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) lowerCAmelCase_ = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_lowerCamelCase ) lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) with torch.no_grad(): lowerCAmelCase_ = model(**_lowerCamelCase ) lowerCAmelCase_ = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) lowerCAmelCase_ = torch.tensor( [[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
606
'''simple docstring''' # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def snake_case_ ( __snake_case : Optional[Any]) -> Union[str, Any]: lowerCAmelCase_ = [False] * len(__snake_case) lowerCAmelCase_ = [-1] * len(__snake_case) def dfs(__snake_case : str , __snake_case : Any): lowerCAmelCase_ = True lowerCAmelCase_ = c for u in graph[v]: if not visited[u]: dfs(__snake_case , 1 - c) for i in range(len(__snake_case)): if not visited[i]: dfs(__snake_case , 0) for i in range(len(__snake_case)): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph A_ : Optional[Any] ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
606
1
import string def UpperCAmelCase ( a_ ) -> None: """simple docstring""" for key in range(len(string.ascii_uppercase ) ): __A = "" for symbol in message: if symbol in string.ascii_uppercase: __A = string.ascii_uppercase.find(a_ ) __A = num - key if num < 0: __A = num + len(string.ascii_uppercase ) __A = translated + string.ascii_uppercase[num] else: __A = translated + symbol print(F'''Decryption using Key #{key}: {translated}''' ) def UpperCAmelCase ( ) -> None: """simple docstring""" __A = input("Encrypted message: " ) __A = message.upper() decrypt(a_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
55
'''simple docstring''' from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image lowerCAmelCase : List[Any] = ["""text""", """image""", """audio"""] def _A ( A ) -> Dict: lowercase : str = [] for input_type in input_types: if input_type == "text": inputs.append("Text input" ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_1_2, 5_1_2) ) ) elif input_type == "audio": inputs.append(torch.ones(3_0_0_0 ) ) elif isinstance(A ,A ): inputs.append(create_inputs(A ) ) else: raise ValueError(F'''Invalid type requested: {input_type}''' ) return inputs def _A ( A ) -> str: lowercase : Tuple = [] for output in outputs: if isinstance(A ,(str, AgentText) ): output_types.append("text" ) elif isinstance(A ,(Image.Image, AgentImage) ): output_types.append("image" ) elif isinstance(A ,(torch.Tensor, AgentAudio) ): output_types.append("audio" ) else: raise ValueError(F'''Invalid output: {output}''' ) return output_types @is_tool_test class _UpperCamelCase : '''simple docstring''' def a__ ( self ) -> Optional[Any]: self.assertTrue(hasattr(self.tool , "inputs" ) ) self.assertTrue(hasattr(self.tool , "outputs" ) ) lowercase : Optional[Any] = self.tool.inputs for _input in inputs: if isinstance(_input , a_ ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) lowercase : Any = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def a__ ( self ) -> Any: lowercase : Any = create_inputs(self.tool.inputs ) lowercase : Tuple = self.tool(*a_ ) # There is a single output if len(self.tool.outputs ) == 1: lowercase : Any = [outputs] self.assertListEqual(output_types(a_ ) , self.tool.outputs ) def a__ ( self ) -> List[str]: self.assertTrue(hasattr(self.tool , "description" ) ) self.assertTrue(hasattr(self.tool , "default_checkpoint" ) ) self.assertTrue(self.tool.description.startswith("This is a tool that" ) ) def a__ ( self ) -> int: lowercase : str = create_inputs(self.tool.inputs ) lowercase : str = self.tool(*a_ ) if not isinstance(a_ , a_ ): lowercase : Union[str, Any] = [outputs] self.assertEqual(len(a_ ) , len(self.tool.outputs ) ) for output, output_type in zip(a_ , self.tool.outputs ): lowercase : List[str] = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(a_ , a_ ) ) def a__ ( self ) -> Optional[int]: lowercase : int = create_inputs(self.tool.inputs ) lowercase : str = [] for _input, input_type in zip(a_ , self.tool.inputs ): if isinstance(a_ , a_ ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error lowercase : Optional[int] = self.tool(*a_ ) if not isinstance(a_ , a_ ): lowercase : str = [outputs] self.assertEqual(len(a_ ) , len(self.tool.outputs ) )
372
0
"""simple docstring""" import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _A = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$") @total_ordering @dataclass class __UpperCAmelCase : """simple docstring""" _snake_case : str _snake_case : Optional[str] = None _snake_case : Optional[Union[str, int]] = None _snake_case : Optional[Union[str, int]] = None _snake_case : Optional[Union[str, int]] = None def A ( self : Dict )-> Optional[int]: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[int] )-> int: return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}""" @property def A ( self : Any )-> List[str]: return self.major, self.minor, self.patch def A ( self : str , A_ : List[str] )-> List[str]: if isinstance(A_ , A_ ): return Version(A_ ) elif isinstance(A_ , A_ ): return other raise TypeError(f"""{other} (type {type(A_ )}) cannot be compared to version.""" ) def __eq__( self : Optional[int] , A_ : List[str] )-> List[Any]: try: __UpperCamelCase = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : str , A_ : Dict )-> Tuple: __UpperCamelCase = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self : Any )-> Optional[int]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def A ( cls : str , A_ : Dict )-> Tuple: __UpperCamelCase = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def A ( self : Optional[Any] )-> str: return self.version_str def lowercase (_snake_case ) -> List[Any]: '''simple docstring''' __UpperCamelCase = _VERSION_REG.match(_snake_case ) if not res: raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" ) return tuple(int(_snake_case ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] ) def lowercase (_snake_case ) -> Optional[Any]: '''simple docstring''' return ".".join(str(_snake_case ) for v in version_tuple )
711
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class __UpperCAmelCase ( snake_case__ ): """simple docstring""" _snake_case : Union[List[PIL.Image.Image], np.ndarray] _snake_case : Optional[List[bool]] _snake_case : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
228
0
'''simple docstring''' def _lowerCamelCase (__lowerCamelCase : Optional[Any] = 1000 ) -> int: return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
489
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : List[Any] = precision _SCREAMING_SNAKE_CASE : List[str] = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : List[str] = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : str = 1 _SCREAMING_SNAKE_CASE : List[str] = 13_591_409 _SCREAMING_SNAKE_CASE : str = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
338
0
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) __lowercase = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } __lowercase = { '''b0''': { '''hidden_dim''': 1_280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1_280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1_408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1_536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1_792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2_048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2_304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2_560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def lowerCAmelCase (__UpperCamelCase : Any ): """simple docstring""" __UpperCamelCase =EfficientNetConfig() __UpperCamelCase =CONFIG_MAP[model_name]['''hidden_dim'''] __UpperCamelCase =CONFIG_MAP[model_name]['''width_coef'''] __UpperCamelCase =CONFIG_MAP[model_name]['''depth_coef'''] __UpperCamelCase =CONFIG_MAP[model_name]['''image_size'''] __UpperCamelCase =CONFIG_MAP[model_name]['''dropout_rate'''] __UpperCamelCase =CONFIG_MAP[model_name]['''dw_padding'''] __UpperCamelCase ='''huggingface/label-files''' __UpperCamelCase ='''imagenet-1k-id2label.json''' __UpperCamelCase =1_0_0_0 __UpperCamelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) __UpperCamelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()} __UpperCamelCase =idalabel __UpperCamelCase ={v: k for k, v in idalabel.items()} return config def lowerCAmelCase (): """simple docstring""" __UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg''' __UpperCamelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im def lowerCAmelCase (__UpperCamelCase : str ): """simple docstring""" __UpperCamelCase =CONFIG_MAP[model_name]['''image_size'''] __UpperCamelCase =EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__UpperCamelCase , ) return preprocessor def lowerCAmelCase (__UpperCamelCase : int ): """simple docstring""" __UpperCamelCase =[v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] __UpperCamelCase =sorted(set(__UpperCamelCase ) ) __UpperCamelCase =len(__UpperCamelCase ) __UpperCamelCase ={b: str(__UpperCamelCase ) for b, i in zip(__UpperCamelCase , range(__UpperCamelCase ) )} __UpperCamelCase =[] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: __UpperCamelCase =block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) __UpperCamelCase ={} for item in rename_keys: if item[0] in original_param_names: __UpperCamelCase ='''efficientnet.''' + item[1] __UpperCamelCase ='''classifier.weight''' __UpperCamelCase ='''classifier.bias''' return key_mapping def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Dict ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue __UpperCamelCase =key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCamelCase =torch.from_numpy(__UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCamelCase =torch.from_numpy(__UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCamelCase =torch.from_numpy(np.transpose(__UpperCamelCase ) ) else: __UpperCamelCase =torch.from_numpy(__UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(__UpperCamelCase ) @torch.no_grad() def lowerCAmelCase (__UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Any ): """simple docstring""" __UpperCamelCase =model_classes[model_name]( include_top=__UpperCamelCase , weights='''imagenet''' , input_tensor=__UpperCamelCase , input_shape=__UpperCamelCase , pooling=__UpperCamelCase , classes=1_0_0_0 , classifier_activation='''softmax''' , ) __UpperCamelCase =original_model.trainable_variables __UpperCamelCase =original_model.non_trainable_variables __UpperCamelCase ={param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCamelCase =param.numpy() __UpperCamelCase =list(tf_params.keys() ) # Load HuggingFace model __UpperCamelCase =get_efficientnet_config(__UpperCamelCase ) __UpperCamelCase =EfficientNetForImageClassification(__UpperCamelCase ).eval() __UpperCamelCase =hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) __UpperCamelCase =rename_keys(__UpperCamelCase ) replace_params(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCamelCase =convert_image_processor(__UpperCamelCase ) __UpperCamelCase =preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCamelCase =hf_model(**__UpperCamelCase ) __UpperCamelCase =outputs.logits.detach().numpy() # Original model inference __UpperCamelCase =False __UpperCamelCase =CONFIG_MAP[model_name]['''image_size'''] __UpperCamelCase =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCamelCase =image.img_to_array(__UpperCamelCase ) __UpperCamelCase =np.expand_dims(__UpperCamelCase , axis=0 ) __UpperCamelCase =original_model.predict(__UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(__UpperCamelCase ): os.mkdir(__UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(__UpperCamelCase ) preprocessor.save_pretrained(__UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) __UpperCamelCase =F"""efficientnet-{model_name}""" preprocessor.push_to_hub(__UpperCamelCase ) hf_model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') __lowercase = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
711
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) __lowercase = logging.getLogger() def lowerCAmelCase (__UpperCamelCase : Path , __UpperCamelCase : list ): """simple docstring""" __UpperCamelCase ='''\n'''.join(__UpperCamelCase ) Path(__UpperCamelCase ).open('''w''' ).writelines(__UpperCamelCase ) __lowercase = '''patrickvonplaten/t5-tiny-random''' __lowercase = '''sshleifer/bart-tiny-random''' __lowercase = '''sshleifer/tiny-mbart''' __lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _lowercase ( __a ): """simple docstring""" def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int ) -> List[str]: '''simple docstring''' __UpperCamelCase =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' __UpperCamelCase =input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() __UpperCamelCase =[''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'''] _dump_articles(UpperCamelCase__ , UpperCamelCase__ ) __UpperCamelCase =str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' ) __UpperCamelCase ='''translation_en_to_de''' if model == T5_TINY else '''summarization''' __UpperCamelCase =f""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ): run_generate() assert Path(UpperCamelCase__ ).exists() # os.remove(Path(output_file_name)) def UpperCAmelCase_ ( self : str ) -> Tuple: '''simple docstring''' self.run_eval_tester(UpperCamelCase__ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> int: '''simple docstring''' self.run_eval_tester(UpperCamelCase__ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Any ) -> Any: '''simple docstring''' __UpperCamelCase =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' __UpperCamelCase =input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() __UpperCamelCase ={ '''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''], '''de''': [ '''Maschinelles Lernen ist großartig, oder?''', '''Ich esse gerne Bananen''', '''Morgen ist wieder ein toller Tag!''', ], } __UpperCamelCase =Path(self.get_auto_remove_tmp_dir() ) __UpperCamelCase =str(tmp_dir / '''scores.json''' ) __UpperCamelCase =str(tmp_dir / '''val.target''' ) _dump_articles(UpperCamelCase__ , text['''en'''] ) _dump_articles(UpperCamelCase__ , text['''de'''] ) __UpperCamelCase ='''translation_en_to_de''' if model == T5_TINY else '''summarization''' __UpperCamelCase =f""" run_eval_search.py {model} {str(UpperCamelCase__ )} {str(UpperCamelCase__ )} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] ) with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ): with CaptureStdout() as cs: run_search() __UpperCamelCase =[''' num_beams | length_penalty''', model, '''Best score args'''] __UpperCamelCase =['''Info'''] if "translation" in task: expected_strings.append('''bleu''' ) else: expected_strings.extend(UpperCamelCase__ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(UpperCamelCase__ ).exists() os.remove(Path(UpperCamelCase__ ) )
296
0
_lowercase = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] _lowercase = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] _lowercase = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] _lowercase = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] _lowercase = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] _lowercase = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] _lowercase = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] _lowercase = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
659
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer _lowercase = logging.get_logger(__name__) _lowercase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } _lowercase = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } _lowercase = { '''facebook/blenderbot_small-90M''': 512, } class __snake_case ( snake_case__ ): """simple docstring""" UpperCamelCase_ = VOCAB_FILES_NAMES UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ = BlenderbotSmallTokenizer def __init__( self : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Union[str, Any]=None ,lowerCAmelCase__ : Any="<|endoftext|>" ,lowerCAmelCase__ : int="<|endoftext|>" ,lowerCAmelCase__ : Optional[Any]="<|endoftext|>" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Optional[Any]=True ,**lowerCAmelCase__ : Union[str, Any] ,) -> str: '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=lowerCAmelCase__ ,merges=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,trim_offsets=lowerCAmelCase__ ,) ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,**lowerCAmelCase__ ,) lowerCAmelCase_ : Dict = add_prefix_space def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Tuple=None ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' lowerCAmelCase_ : Dict = [self.sep_token_id] lowerCAmelCase_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
659
1
"""simple docstring""" import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowercase__ : str = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : list = None ): lowerCAmelCase_ : List[str] = None lowerCAmelCase_ : List[str] = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) lowerCAmelCase_ : int = os.path.abspath('examples' ) for item in os.listdir(__UpperCamelCase ): if item not in EXCLUDE_EXAMPLES: lowerCAmelCase_ : int = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ) and ".py" in item_path: with self.subTest( tested_script=__UpperCamelCase , feature_script=__UpperCamelCase , tested_section='main()' if parser_only else 'training_function()' , ): lowerCAmelCase_ : Dict = compare_against_test( os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) lowerCAmelCase_ : Any = '\n'.join(__UpperCamelCase ) if special_strings is not None: for string in special_strings: lowerCAmelCase_ : Tuple = diff.replace(__UpperCamelCase , '' ) self.assertEqual(__UpperCamelCase , '' ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): self.one_complete_example('complete_nlp_example.py' , __UpperCamelCase ) self.one_complete_example('complete_nlp_example.py' , __UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowerCAmelCase_ : List[str] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) lowerCAmelCase_ : Union[str, Any] = [ ' ' * 1_6 + '{\n\n', ' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 2_0 + '"f1": eval_metric["f1"],\n\n', ' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 2_0 + '"epoch": epoch,\n\n', ' ' * 1_6 + '},\n\n', ' ' * 1_6 + 'step=epoch,\n', ' ' * 1_2, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) self.one_complete_example('complete_cv_example.py' , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @mock.patch.dict(os.environ, {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = False @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ): super().setUpClass() lowerCAmelCase_ : Tuple = tempfile.mkdtemp() lowerCAmelCase_ : str = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) lowerCAmelCase_ : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Dict ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : Optional[int] = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() lowerCAmelCase_ : Dict = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowerCAmelCase_ : Any = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split() lowerCAmelCase_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase ) self.assertNotIn('epoch 0:' , __UpperCamelCase ) self.assertIn('epoch 1:' , __UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowerCAmelCase_ : Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split() lowerCAmelCase_ : int = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase ) if torch.cuda.is_available(): lowerCAmelCase_ : Any = torch.cuda.device_count() else: lowerCAmelCase_ : str = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , __UpperCamelCase ) self.assertIn('epoch 1:' , __UpperCamelCase ) else: self.assertIn('epoch 0:' , __UpperCamelCase ) self.assertIn('epoch 1:' , __UpperCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowerCAmelCase_ : Dict = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): lowerCAmelCase_ : Tuple = run_command(self._launch_args + testargs , return_stdout=__UpperCamelCase ) lowerCAmelCase_ : Optional[Any] = re.findall('({.+})' , __UpperCamelCase ) lowerCAmelCase_ : Tuple = [r for r in results if 'accuracy' in r][-1] lowerCAmelCase_ : Tuple = ast.literal_eval(__UpperCamelCase ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def SCREAMING_SNAKE_CASE__ ( self : Any ): lowerCAmelCase_ : Tuple = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: lowerCAmelCase_ : Any = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'tracking' ) ) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowerCAmelCase_ : Optional[int] = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowerCAmelCase_ : Any = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
707
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger lowercase__ : Optional[Any] = get_logger(__name__) lowercase__ : Tuple = Path(__file__).parent / """model_card_template.md""" lowercase__ : Optional[Any] = uuida().hex lowercase__ : Any = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES lowercase__ : Tuple = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES lowercase__ : Dict = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def UpperCamelCase_ ( lowerCAmelCase__ : Union[Dict, str, None] = None ) -> str: """simple docstring""" lowerCAmelCase_ : Optional[int] = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_flax_available(): ua += f"; jax/{_jax_version}" ua += f"; flax/{_flax_version}" if is_onnx_available(): ua += f"; onnxruntime/{_onnxruntime_version}" # CI will set this value to True if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): ua += "; " + user_agent return ua def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None ) -> Union[str, Any]: """simple docstring""" if token is None: lowerCAmelCase_ : Any = HfFolder.get_token() if organization is None: lowerCAmelCase_ : Union[str, Any] = whoami(lowerCAmelCase__ )['name'] return f"{username}/{model_id}" else: return f"{organization}/{model_id}" def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ) -> Optional[int]: """simple docstring""" if not is_jinja_available(): raise ValueError( 'Modelcard rendering is based on Jinja templates.' ' Please make sure to have `jinja` installed before using `create_model_card`.' ' To install it, please run `pip install Jinja2`.' ) if hasattr(lowerCAmelCase__ , 'local_rank' ) and args.local_rank not in [-1, 0]: return lowerCAmelCase_ : List[str] = args.hub_token if hasattr(lowerCAmelCase__ , 'hub_token' ) else None lowerCAmelCase_ : List[Any] = get_full_repo_name(lowerCAmelCase__ , token=lowerCAmelCase__ ) lowerCAmelCase_ : Dict = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCAmelCase__ , model_name=lowerCAmelCase__ , repo_name=lowerCAmelCase__ , dataset_name=args.dataset_name if hasattr(lowerCAmelCase__ , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(lowerCAmelCase__ , 'gradient_accumulation_steps' ) else None ) , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCAmelCase__ , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCAmelCase__ , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCAmelCase__ , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCAmelCase__ , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCAmelCase__ , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(lowerCAmelCase__ , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCAmelCase__ , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , ) lowerCAmelCase_ : Tuple = os.path.join(args.output_dir , 'README.md' ) model_card.save(lowerCAmelCase__ ) def UpperCamelCase_ ( lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[str] = None ) -> Tuple: """simple docstring""" if resolved_file is None or commit_hash is not None: return commit_hash lowerCAmelCase_ : Tuple = str(Path(lowerCAmelCase__ ).as_posix() ) lowerCAmelCase_ : Any = re.search(R'snapshots/([^/]+)/' , lowerCAmelCase__ ) if search is None: return None lowerCAmelCase_ : Tuple = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(lowerCAmelCase__ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. lowercase__ : int = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) lowercase__ : Optional[int] = os.path.join(hf_cache_home, """diffusers""") def UpperCamelCase_ ( lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None ) -> None: """simple docstring""" if new_cache_dir is None: lowerCAmelCase_ : Any = DIFFUSERS_CACHE if old_cache_dir is None: lowerCAmelCase_ : Optional[int] = old_diffusers_cache lowerCAmelCase_ : Optional[int] = Path(lowerCAmelCase__ ).expanduser() lowerCAmelCase_ : Dict = Path(lowerCAmelCase__ ).expanduser() for old_blob_path in old_cache_dir.glob('**/blobs/*' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): lowerCAmelCase_ : List[str] = new_cache_dir / old_blob_path.relative_to(lowerCAmelCase__ ) new_blob_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) os.replace(lowerCAmelCase__ , lowerCAmelCase__ ) try: os.symlink(lowerCAmelCase__ , lowerCAmelCase__ ) except OSError: logger.warning( 'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). lowercase__ : Any = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): lowercase__ : int = 0 else: with open(cache_version_file) as f: try: lowercase__ : int = int(f.read()) except ValueError: lowercase__ : Any = 0 if cache_version < 1: lowercase__ : int = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: lowercase__ : int = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ' """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ' """the directory exists and can be written to.""" ) def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> str: """simple docstring""" if variant is not None: lowerCAmelCase_ : Any = weights_name.split('.' ) lowerCAmelCase_ : List[Any] = splits[:-1] + [variant] + splits[-1:] lowerCAmelCase_ : Any = '.'.join(lowerCAmelCase__ ) return weights_name def UpperCamelCase_ ( lowerCAmelCase__ : List[str] , *, lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]=None , ) -> str: """simple docstring""" lowerCAmelCase_ : Optional[int] = str(lowerCAmelCase__ ) if os.path.isfile(lowerCAmelCase__ ): return pretrained_model_name_or_path elif os.path.isdir(lowerCAmelCase__ ): if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ): # Load from a PyTorch checkpoint lowerCAmelCase_ : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ): lowerCAmelCase_ : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return model_file else: raise EnvironmentError( f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(lowerCAmelCase__ ).base_version ) >= version.parse('0.20.0' ) ): try: lowerCAmelCase_ : Dict = hf_hub_download( lowerCAmelCase__ , filename=_add_variant(lowerCAmelCase__ , lowerCAmelCase__ ) , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , ) warnings.warn( f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , lowerCAmelCase__ , ) return model_file except: # noqa: E722 warnings.warn( f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )}' so that the correct variant file can be added." , lowerCAmelCase__ , ) try: # 2. Load model file as usual lowerCAmelCase_ : Optional[Any] = hf_hub_download( lowerCAmelCase__ , filename=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " 'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ' 'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ' 'login`.' ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " 'this model name. Check the model page at ' f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." ) except HTTPError as err: raise EnvironmentError( f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {weights_name} or" ' \nCheckout your internet connection or see how to run the library in' ' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' ) except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " '\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ' f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {weights_name}" )
317
0
import math import flax.linen as nn import jax.numpy as jnp def lowerCamelCase_ ( UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 , UpperCamelCase__ : float = 1 , UpperCamelCase__ : float = 1.0E4 , UpperCamelCase__ : bool = False , UpperCamelCase__ : float = 1.0 , ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" __lowerCamelCase = float(embedding_dim // 2 ) __lowerCamelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __lowerCamelCase = min_timescale * jnp.exp(jnp.arange(UpperCamelCase__ , dtype=jnp.floataa ) * -log_timescale_increment ) __lowerCamelCase = jnp.expand_dims(UpperCamelCase__ , 1 ) * jnp.expand_dims(UpperCamelCase__ , 0 ) # scale embeddings __lowerCamelCase = scale * emb if flip_sin_to_cos: __lowerCamelCase = jnp.concatenate([jnp.cos(UpperCamelCase__ ), jnp.sin(UpperCamelCase__ )] , axis=1 ) else: __lowerCamelCase = jnp.concatenate([jnp.sin(UpperCamelCase__ ), jnp.cos(UpperCamelCase__ )] , axis=1 ) __lowerCamelCase = jnp.reshape(UpperCamelCase__ , [jnp.shape(UpperCamelCase__ )[0], embedding_dim] ) return signal class __lowerCAmelCase ( nn.Module ): """simple docstring""" snake_case_ = 32 snake_case_ = jnp.floataa @nn.compact def __call__( self , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(lowerCamelCase__ ) __lowerCamelCase = nn.silu(lowerCamelCase__ ) __lowerCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(lowerCamelCase__ ) return temb class __lowerCAmelCase ( nn.Module ): """simple docstring""" snake_case_ = 32 snake_case_ = False snake_case_ = 1 @nn.compact def __call__( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' return get_sinusoidal_embeddings( lowerCamelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
469
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''vit_mae''' def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=16 , lowerCamelCase__=512 , lowerCamelCase__=8 , lowerCamelCase__=2_048 , lowerCamelCase__=0.75 , lowerCamelCase__=False , **lowerCamelCase__ , ) -> Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = qkv_bias __lowerCamelCase = decoder_num_attention_heads __lowerCamelCase = decoder_hidden_size __lowerCamelCase = decoder_num_hidden_layers __lowerCamelCase = decoder_intermediate_size __lowerCamelCase = mask_ratio __lowerCamelCase = norm_pix_loss
469
1
from __future__ import annotations _snake_case = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _snake_case = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [] _lowerCAmelCase : Tuple = len(_lowerCamelCase ) for i in range(_lowerCamelCase ): _lowerCAmelCase : float = -1 for j in range(i + 1 , _lowerCamelCase ): if arr[i] < arr[j]: _lowerCAmelCase : Union[str, Any] = arr[j] break result.append(_lowerCamelCase ) return result def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = [] for i, outer in enumerate(_lowerCamelCase ): _lowerCAmelCase : float = -1 for inner in arr[i + 1 :]: if outer < inner: _lowerCAmelCase : str = inner break result.append(_lowerCamelCase ) return result def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = len(_lowerCamelCase ) _lowerCAmelCase : list[float] = [] _lowerCAmelCase : list[float] = [-1] * arr_size for index in reversed(range(_lowerCamelCase ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: _lowerCAmelCase : List[str] = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _snake_case = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
658
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = "https://openaipublic.azureedge.net/jukebox/models/" _snake_case = { "jukebox-1b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "1b_lyrics/prior_level_2.pth.tar", ], "jukebox-5b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "5b_lyrics/prior_level_2.pth.tar", ], } def A ( _lowerCamelCase ): '''simple docstring''' if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10: _lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" ) elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10: _lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" ) elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10: _lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" ) elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10: _lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" ) if "conditioner_blocks.0." in key: _lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" ) if "prime_prior" in key: _lowerCAmelCase : int = key.replace("prime_prior" , "encoder" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: _lowerCAmelCase : int = key.replace(".emb." , "." ) if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(".k" , ".codebook" ) if "y_emb." in key: return key.replace("y_emb." , "metadata_embedding." ) if "x_emb.emb." in key: _lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" ) if "prime_state_ln" in key: return key.replace("prime_state_ln" , "encoder.final_layer_norm" ) if ".ln" in key: return key.replace(".ln" , ".layer_norm" ) if "_ln" in key: return key.replace("_ln" , "_layer_norm" ) if "prime_state_proj" in key: return key.replace("prime_state_proj" , "encoder.proj_in" ) if "prime_x_out" in key: return key.replace("prime_x_out" , "encoder.lm_head" ) if "prior.x_out" in key: return key.replace("x_out" , "fc_proj_out" ) if "x_emb" in key: return key.replace("x_emb" , "embed_tokens" ) return key def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Any = {} import re _lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) _lowerCAmelCase : List[str] = re.compile( r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) _lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) _lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) _lowerCAmelCase : List[str] = re.compile( r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) _lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) _lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" ) _lowerCAmelCase : List[Any] = re.compile( r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) _lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ): _lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase ) _lowerCAmelCase : List[str] = regex_match.groups() _lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) _lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}" _lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase ) elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ): _lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase ) _lowerCAmelCase : str = regex_match.groups() _lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) _lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]] _lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}." _lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" _lowerCAmelCase : int = prefix + resnet_block _lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase ) elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ): _lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase ) _lowerCAmelCase : List[Any] = regex_match.groups() _lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}" _lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ): _lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = regex_match.groups() _lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2 _lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}" _lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase ) elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ): _lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase ) _lowerCAmelCase : List[str] = regex_match.groups() _lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2 _lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]] _lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}." _lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" _lowerCAmelCase : Dict = prefix + resnet_block _lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase ) elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ): _lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = regex_match.groups() _lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}" _lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ): _lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase ) _lowerCAmelCase : List[Any] = regex_match.groups() _lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2 _lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}" _lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase ) elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ): _lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase ) _lowerCAmelCase : List[str] = regex_match.groups() _lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 _lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]] _lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}." _lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" _lowerCAmelCase : List[Any] = prefix + resnet_block _lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase ) elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ): _lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase ) _lowerCAmelCase : Optional[Any] = regex_match.groups() _lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}" _lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase ) # keep original key else: _lowerCAmelCase : Optional[int] = original_key _lowerCAmelCase : Tuple = replace_key(_lowerCamelCase ) if F"{key_prefix}.{key}" not in model_state_dict or key is None: print(F"failed converting {original_key} to {key}, does not match" ) # handle missmatched shape elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape: _lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"] print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" ) _lowerCAmelCase : Tuple = original_key _lowerCAmelCase : List[Any] = original_key _lowerCAmelCase : Optional[int] = value return new_dict @torch.no_grad() def A ( _lowerCamelCase=None , _lowerCamelCase=None ): '''simple docstring''' for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ): _lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase ) os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase ) open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content ) _lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]] _lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase ) _lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase ) _lowerCAmelCase : Optional[int] = [] _lowerCAmelCase : List[Any] = {} for i, dict_name in enumerate(_lowerCamelCase ): _lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"] _lowerCAmelCase : Union[str, Any] = {} for k in old_dic.keys(): if k.endswith(".b" ): _lowerCAmelCase : Dict = old_dic[k] elif k.endswith(".w" ): _lowerCAmelCase : Tuple = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: _lowerCAmelCase : str = old_dic[k] else: _lowerCAmelCase : Union[str, Any] = old_dic[k] _lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}" _lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase ) weight_dict.append(_lowerCamelCase ) _lowerCAmelCase : Optional[Any] = weight_dict.pop(0 ) model.vqvae.load_state_dict(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile: json.dump(_lowerCamelCase , _lowerCamelCase ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCamelCase ) return weight_dict if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="jukebox-5b-lyrics", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="jukebox-5b-lyrics-converted", type=str, help="Path to the output PyTorch model directory.", ) _snake_case = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
658
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=3 , lowerCamelCase=2_24 , lowerCamelCase=30 , lowerCamelCase=4_00 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , ) -> int: '''simple docstring''' UpperCamelCase : Dict = size if size is not None else {"height": 18, "width": 18} UpperCamelCase : Optional[Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Union[str, Any] = num_channels UpperCamelCase : List[str] = image_size UpperCamelCase : Dict = min_resolution UpperCamelCase : int = max_resolution UpperCamelCase : str = do_resize UpperCamelCase : int = size UpperCamelCase : str = do_normalize UpperCamelCase : str = image_mean UpperCamelCase : Union[str, Any] = image_std def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE = ViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' UpperCamelCase : Optional[int] = EfficientFormerImageProcessorTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase , "size" ) ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input UpperCamelCase : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched UpperCamelCase : Any = image_processor(lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input UpperCamelCase : List[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched UpperCamelCase : str = image_processor(lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched UpperCamelCase : Tuple = image_processor(lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
173
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , **lowerCamelCase , ) -> Dict: '''simple docstring''' UpperCamelCase : Optional[Any] = path_or_paths UpperCamelCase : List[str] = split if split or isinstance(lowerCamelCase , lowerCamelCase ) else "train" UpperCamelCase : Any = features UpperCamelCase : Optional[int] = cache_dir UpperCamelCase : str = keep_in_memory UpperCamelCase : str = streaming UpperCamelCase : List[Any] = num_proc UpperCamelCase : Optional[Any] = kwargs @abstractmethod def SCREAMING_SNAKE_CASE__ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: '''simple docstring''' pass class UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , **lowerCamelCase , ) -> str: '''simple docstring''' UpperCamelCase : Tuple = features UpperCamelCase : str = cache_dir UpperCamelCase : List[Any] = keep_in_memory UpperCamelCase : int = streaming UpperCamelCase : int = num_proc UpperCamelCase : Tuple = kwargs @abstractmethod def SCREAMING_SNAKE_CASE__ ( self ) -> Union[Dataset, IterableDataset]: '''simple docstring''' pass
173
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a_ : Optional[int] = 16 a_ : int = 32 def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ): __magic_name__ = AutoTokenizer.from_pretrained(snake_case_ ) __magic_name__ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case_ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __magic_name__ = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case_ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __magic_name__ = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) __magic_name__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ): model.eval() __magic_name__ = 0 for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __magic_name__ , __magic_name__ = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case_ ) - 1: __magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] __magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) __magic_name__ = metric.compute() return eval_metric["accuracy"] def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ): # Initialize accelerator __magic_name__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __magic_name__ = config['''lr'''] __magic_name__ = int(config['''num_epochs'''] ) __magic_name__ = int(config['''seed'''] ) __magic_name__ = int(config['''batch_size'''] ) __magic_name__ = args.model_name_or_path set_seed(snake_case_ ) __magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ ) # Instantiate optimizer __magic_name__ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ ) if accelerator.state.deepspeed_plugin is not None: __magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __magic_name__ = 1 __magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __magic_name__ = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , ) else: __magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # We need to keep track of how many total steps we have iterated over __magic_name__ = 0 # We also need to keep track of the stating epoch so files are named properly __magic_name__ = 0 __magic_name__ = evaluate.load('''glue''' , '''mrpc''' ) __magic_name__ = num_epochs if args.partial_train_epoch is not None: __magic_name__ = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) __magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1] __magic_name__ = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break __magic_name__ = int(snake_case_ ) + 1 __magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) accelerator.print('''resumed checkpoint performance:''' , snake_case_ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f: __magic_name__ = json.load(snake_case_ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model __magic_name__ = {} for epoch in range(snake_case_ , snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.loss __magic_name__ = loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 __magic_name__ = f'epoch_{epoch}' __magic_name__ = os.path.join(args.output_dir , snake_case_ ) accelerator.save_state(snake_case_ ) __magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __magic_name__ = accuracy __magic_name__ = lr_scheduler.get_lr()[0] __magic_name__ = optimizer.param_groups[0]['''lr'''] __magic_name__ = epoch __magic_name__ = overall_step accelerator.print(f'epoch {epoch}:' , snake_case_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f: json.dump(snake_case_ , snake_case_ ) def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , ) parser.add_argument( '''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , ) __magic_name__ = parser.parse_args() __magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
678
import re def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): __magic_name__ = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(snake_case_ , snake_case_ ) ) if __name__ == "__main__": a_ : Optional[int] = '0094702343221' print(is_sri_lankan_phone_number(phone))
678
1
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __UpperCamelCase( ): '''simple docstring''' UpperCAmelCase__ : str = [randint(-10_00 , 10_00 ) for i in range(10 )] UpperCAmelCase__ : Optional[Any] = randint(-50_00 , 50_00 ) return (arr, r) UpperCamelCase__ : Any = make_dataset() def __UpperCamelCase( _A : list[int] , _A : int ): '''simple docstring''' for triplet in permutations(_A , 3 ): if sum(_A ) == target: return tuple(sorted(_A ) ) return (0, 0, 0) def __UpperCamelCase( _A : list[int] , _A : int ): '''simple docstring''' arr.sort() UpperCAmelCase__ : List[Any] = len(_A ) for i in range(n - 1 ): UpperCAmelCase__ , UpperCAmelCase__ : str = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __UpperCamelCase( ): '''simple docstring''' UpperCAmelCase__ : List[str] = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' UpperCAmelCase__ : List[Any] = ''' triplet_sum1(*dataset) ''' UpperCAmelCase__ : Any = ''' triplet_sum2(*dataset) ''' UpperCAmelCase__ : Dict = repeat(setup=_A , stmt=_A , repeat=5 , number=1_00_00 ) UpperCAmelCase__ : Union[str, Any] = repeat(setup=_A , stmt=_A , repeat=5 , number=1_00_00 ) return (min(_A ), min(_A )) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase__ : Optional[Any] = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
614
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) UpperCamelCase__ : Dict = logging.getLogger(__name__) def __UpperCamelCase( ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=_A , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=_A , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=_A , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=_A , default='''data/dump''' , help='''The dump file prefix.''' ) UpperCAmelCase__ : Optional[int] = parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": UpperCAmelCase__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase__ : Optional[Any] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` UpperCAmelCase__ : Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase__ : Union[str, Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase__ : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>` UpperCAmelCase__ : Dict = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase__ : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase__ : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` UpperCAmelCase__ : Optional[Any] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: UpperCAmelCase__ : List[Any] = fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(_A )} examples to process.''' ) UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Union[str, Any] = 0 UpperCAmelCase__ : Optional[int] = 1_00_00 UpperCAmelCase__ : Tuple = time.time() for text in data: UpperCAmelCase__ : Any = F'''{bos} {text.strip()} {sep}''' UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A ) rslt.append(_A ) iter += 1 if iter % interval == 0: UpperCAmelCase__ : int = time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) UpperCAmelCase__ : Optional[Any] = time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(_A )} examples processed.''' ) UpperCAmelCase__ : Dict = F'''{args.dump_file}.{args.tokenizer_name}.pickle''' UpperCAmelCase__ : Dict = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase__ : Any = [np.uintaa(_A ) for d in rslt] else: UpperCAmelCase__ : str = [np.intaa(_A ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(_A , '''wb''' ) as handle: pickle.dump(rslt_ , _A , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
614
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): __snake_case : Dict = KandinskyImgaImgPipeline __snake_case : List[str] = ["prompt", "image_embeds", "negative_image_embeds", "image"] __snake_case : int = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] __snake_case : Any = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __snake_case : List[str] = False @property def UpperCamelCase ( self: str ): '''simple docstring''' return 32 @property def UpperCamelCase ( self: Any ): '''simple docstring''' return 32 @property def UpperCamelCase ( self: str ): '''simple docstring''' return self.time_input_dim @property def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase ( self: int ): '''simple docstring''' return 100 @property def UpperCamelCase ( self: Any ): '''simple docstring''' _SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def UpperCamelCase ( self: List[Any] ): '''simple docstring''' torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) _SCREAMING_SNAKE_CASE = MultilingualCLIP(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = text_encoder.eval() return text_encoder @property def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCAmelCase_ ) return model @property def UpperCamelCase ( self: Tuple ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs ) return model def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.dummy_text_encoder _SCREAMING_SNAKE_CASE = self.dummy_tokenizer _SCREAMING_SNAKE_CASE = self.dummy_unet _SCREAMING_SNAKE_CASE = self.dummy_movq _SCREAMING_SNAKE_CASE = { """num_train_timesteps""": 1_000, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } _SCREAMING_SNAKE_CASE = DDIMScheduler(**UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str=0 ): '''simple docstring''' _SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCAmelCase_ ) # create init_image _SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0] _SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("""RGB""" ).resize((256, 256) ) if str(UpperCAmelCase_ ).startswith("""mps""" ): _SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ ) else: _SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def UpperCamelCase ( self: int ): '''simple docstring''' _SCREAMING_SNAKE_CASE = """cpu""" _SCREAMING_SNAKE_CASE = self.get_dummy_components() _SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) ) _SCREAMING_SNAKE_CASE = output.images _SCREAMING_SNAKE_CASE = pipe( **self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0] _SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] _SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _SCREAMING_SNAKE_CASE = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class __UpperCAmelCase (unittest.TestCase ): def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self: List[str] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) _SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _SCREAMING_SNAKE_CASE = """A red cartoon frog, 4k""" _SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) _SCREAMING_SNAKE_CASE = pipeline.to(UpperCAmelCase_ ) pipeline.set_progress_bar_config(disable=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipe_prior( UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _SCREAMING_SNAKE_CASE = pipeline( UpperCAmelCase_ , image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , ) _SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
569
import math from numpy import inf from scipy.integrate import quad def __lowerCamelCase ( snake_case__ ) -> float: """simple docstring""" if num <= 0: raise ValueError("""math domain error""" ) return quad(snake_case__ ,0 ,snake_case__ ,args=(snake_case__) )[0] def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> float: """simple docstring""" return math.pow(snake_case__ ,z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
569
1
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": A_ : Tuple ="""%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """))) print("""Googling.....""") A_ : Dict =F'''https://www.google.com/search?q={query}&num=100''' A_ : Dict =requests.get( url, headers={"""User-Agent""": str(UserAgent().random)}, ) try: A_ : Tuple =( BeautifulSoup(res.text, """html.parser""") .find("""div""", attrs={"""class""": """yuRUbf"""}) .find("""a""") .get("""href""") ) except AttributeError: A_ : str =parse_qs( BeautifulSoup(res.text, """html.parser""") .find("""div""", attrs={"""class""": """kCrYT"""}) .find("""a""") .get("""href""") )["""url"""][0] webbrowser.open(link)
483
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig lowercase : int = logging.getLogger(__name__) class _a (a__ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = """masked_bert""" def __init__( self ,__a=30_522 ,__a=768 ,__a=12 ,__a=12 ,__a=3_072 ,__a="gelu" ,__a=0.1 ,__a=0.1 ,__a=512 ,__a=2 ,__a=0.02 ,__a=1E-12 ,__a=0 ,__a="topK" ,__a="constant" ,__a=0.0 ,**__a ,) -> List[str]: super().__init__(pad_token_id=__a ,**__a ) snake_case : Dict = vocab_size snake_case : Optional[Any] = hidden_size snake_case : Dict = num_hidden_layers snake_case : List[Any] = num_attention_heads snake_case : Dict = hidden_act snake_case : Any = intermediate_size snake_case : Optional[int] = hidden_dropout_prob snake_case : Optional[Any] = attention_probs_dropout_prob snake_case : List[Any] = max_position_embeddings snake_case : int = type_vocab_size snake_case : int = initializer_range snake_case : int = layer_norm_eps snake_case : Any = pruning_method snake_case : Union[str, Any] = mask_init snake_case : int = mask_scale
116
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase : Optional[int] = logging.get_logger(__name__) UpperCamelCase : Dict = { """ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""", } class A__ ( A__ ): """simple docstring""" _lowercase = 'deta' _lowercase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[Any] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=900 , lowerCamelCase__ : Union[str, Any]=2_048 , lowerCamelCase__ : Dict=6 , lowerCamelCase__ : str=2_048 , lowerCamelCase__ : Tuple=8 , lowerCamelCase__ : List[Any]=6 , lowerCamelCase__ : Dict=1_024 , lowerCamelCase__ : int=8 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : Tuple=256 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : int=1.0 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Tuple="sine" , lowerCamelCase__ : Any=5 , lowerCamelCase__ : str=4 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Union[str, Any]=300 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Dict=1 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : List[Any]=1 , lowerCamelCase__ : Dict=5 , lowerCamelCase__ : str=2 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Union[str, Any]=0.25 , **lowerCamelCase__ : int , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) a__ : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): a__ : Tuple = backbone_config.pop("model_type" ) a__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type] a__ : Tuple = config_class.from_dict(lowerCamelCase__ ) a__ : int = backbone_config a__ : int = num_queries a__ : Dict = max_position_embeddings a__ : Union[str, Any] = d_model a__ : List[Any] = encoder_ffn_dim a__ : int = encoder_layers a__ : Dict = encoder_attention_heads a__ : Dict = decoder_ffn_dim a__ : List[str] = decoder_layers a__ : str = decoder_attention_heads a__ : Optional[int] = dropout a__ : str = attention_dropout a__ : Optional[Any] = activation_dropout a__ : Any = activation_function a__ : Dict = init_std a__ : Tuple = init_xavier_std a__ : str = encoder_layerdrop a__ : Dict = auxiliary_loss a__ : List[Any] = position_embedding_type # deformable attributes a__ : Dict = num_feature_levels a__ : str = encoder_n_points a__ : Tuple = decoder_n_points a__ : List[Any] = two_stage a__ : Dict = two_stage_num_proposals a__ : int = with_box_refine a__ : int = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher a__ : List[str] = class_cost a__ : Optional[Any] = bbox_cost a__ : List[str] = giou_cost # Loss coefficients a__ : Any = mask_loss_coefficient a__ : Union[str, Any] = dice_loss_coefficient a__ : List[str] = bbox_loss_coefficient a__ : Any = giou_loss_coefficient a__ : Optional[int] = eos_coefficient a__ : Tuple = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ ) @property def _UpperCamelCase( self : List[str] ): return self.encoder_attention_heads @property def _UpperCamelCase( self : Tuple ): return self.d_model def _UpperCamelCase( self : List[Any] ): a__ : Dict = copy.deepcopy(self.__dict__ ) a__ : List[Any] = self.backbone_config.to_dict() a__ : Optional[Any] = self.__class__.model_type return output
151
from copy import deepcopy class A__ : """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase__ : list[int] | None = None , lowerCamelCase__ : int | None = None ): if arr is None and size is not None: a__ : Union[str, Any] = size a__ : Optional[Any] = [0] * size elif arr is not None: self.init(lowerCamelCase__ ) else: raise ValueError("Either arr or size must be specified" ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : list[int] ): a__ : Any = len(lowerCamelCase__ ) a__ : List[Any] = deepcopy(lowerCamelCase__ ) for i in range(1 , self.size ): a__ : Union[str, Any] = self.next_(lowerCamelCase__ ) if j < self.size: self.tree[j] += self.tree[i] def _UpperCamelCase( self : Tuple ): a__ : List[str] = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): a__ : Optional[Any] = self.next_(lowerCamelCase__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def _UpperCamelCase( lowerCamelCase__ : int ): return index + (index & (-index)) @staticmethod def _UpperCamelCase( lowerCamelCase__ : int ): return index - (index & (-index)) def _UpperCamelCase( self : str , lowerCamelCase__ : int , lowerCamelCase__ : int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value a__ : Optional[int] = self.next_(lowerCamelCase__ ) def _UpperCamelCase( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int ): self.add(lowerCamelCase__ , value - self.get(lowerCamelCase__ ) ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ): if right == 0: return 0 a__ : Tuple = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] a__ : List[Any] = self.prev(lowerCamelCase__ ) return result def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ): return self.prefix(lowerCamelCase__ ) - self.prefix(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ): return self.query(lowerCamelCase__ , index + 1 ) def _UpperCamelCase( self : int , lowerCamelCase__ : int ): value -= self.tree[0] if value < 0: return -1 a__ : Union[str, Any] = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 a__ : Tuple = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
151
1
"""simple docstring""" def __magic_name__ ( _lowerCamelCase : int = 1_0**1_2 ): __a : Union[str, Any] = 1 __a : Union[str, Any] = 0 __a : int = 1 __a : Tuple = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f'{solution() = }')
581
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging lowercase__ = logging.get_logger(__name__) lowercase__ = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class SCREAMING_SNAKE_CASE__ ( __snake_case ): _lowerCAmelCase = "bloom" _lowerCAmelCase = ["past_key_values"] _lowerCAmelCase = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__(self , _lowercase=250880 , _lowercase=64 , _lowercase=2 , _lowercase=8 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=1 , _lowercase=2 , _lowercase=False , _lowercase=0.0 , _lowercase=0.0 , _lowercase=1 , _lowercase=False , **_lowercase , ): '''simple docstring''' __a : Tuple = vocab_size # Backward compatibility with n_embed kwarg __a : Tuple = kwargs.pop("""n_embed""" , _lowercase ) __a : Optional[Any] = hidden_size if n_embed is None else n_embed __a : Optional[int] = n_layer __a : Optional[int] = n_head __a : Union[str, Any] = layer_norm_epsilon __a : Optional[Any] = initializer_range __a : List[str] = use_cache __a : List[str] = pretraining_tp __a : Optional[Any] = apply_residual_connection_post_layernorm __a : Optional[int] = hidden_dropout __a : List[str] = attention_dropout __a : Tuple = bos_token_id __a : Tuple = eos_token_id __a : List[str] = slow_but_exact super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) class SCREAMING_SNAKE_CASE__ ( __snake_case ): _lowerCAmelCase = version.parse("1.12" ) def __init__(self , _lowercase , _lowercase = "default" , _lowercase = None , _lowercase = False , ): '''simple docstring''' super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase ) if not getattr(self._config , """pad_token_id""" , _lowercase ): # TODO: how to do that better? __a : Any = 0 @property def lowerCAmelCase__(self ): '''simple docstring''' __a : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_lowercase , direction="""inputs""" , inverted_values_shape=_lowercase ) __a : int = {0: """batch""", 1: """past_sequence + sequence"""} else: __a : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowerCAmelCase__(self ): '''simple docstring''' return self._config.n_layer @property def lowerCAmelCase__(self ): '''simple docstring''' return self._config.n_head @property def lowerCAmelCase__(self ): '''simple docstring''' return 1e-3 def lowerCAmelCase__(self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ): '''simple docstring''' __a : Union[str, Any] = super(_lowercase , self ).generate_dummy_inputs( _lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase ) # We need to order the input in the way they appears in the forward() __a : List[str] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __a , __a : Optional[int] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __a : str = seqlen + 2 __a : List[Any] = self._config.hidden_size // self.num_attention_heads __a : List[Any] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) __a : Optional[int] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) __a : int = [ (torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers ) ] __a : int = common_inputs["""attention_mask"""] if self.use_past: __a : int = ordered_inputs["""attention_mask"""].dtype __a : Union[str, Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase__(self ): '''simple docstring''' return 13
581
1
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( __snake_case ) -> str: _UpperCAmelCase = 2 _UpperCAmelCase = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(SCREAMING_SNAKE_CASE__ ) if n > 1: factors.append(SCREAMING_SNAKE_CASE__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
704
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ): '''simple docstring''' @register_to_config def __init__( self : Any , lowerCamelCase : int = 128 , lowerCamelCase : int = 256 , lowerCamelCase : float = 2000.0 , lowerCamelCase : int = 768 , lowerCamelCase : int = 12 , lowerCamelCase : int = 12 , lowerCamelCase : int = 64 , lowerCamelCase : int = 2048 , lowerCamelCase : float = 0.1 , ) -> str: """simple docstring""" super().__init__() _UpperCAmelCase = nn.Sequential( nn.Linear(lowerCamelCase , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , ) _UpperCAmelCase = nn.Embedding(lowerCamelCase , lowerCamelCase ) _UpperCAmelCase = False _UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) _UpperCAmelCase = nn.Dropout(p=lowerCamelCase ) _UpperCAmelCase = nn.ModuleList() for lyr_num in range(lowerCamelCase ): # FiLM conditional T5 decoder _UpperCAmelCase = DecoderLayer(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase ) self.decoders.append(lowerCamelCase ) _UpperCAmelCase = TaLayerNorm(lowerCamelCase ) _UpperCAmelCase = nn.Dropout(p=lowerCamelCase ) _UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) def lowerCamelCase ( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowerCamelCase ( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _UpperCAmelCase = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _UpperCAmelCase = self.conditioning_emb(lowerCamelCase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _UpperCAmelCase = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _UpperCAmelCase = torch.broadcast_to( torch.arange(lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _UpperCAmelCase = self.position_encoding(lowerCamelCase ) _UpperCAmelCase = self.continuous_inputs_projection(lowerCamelCase ) inputs += position_encodings _UpperCAmelCase = self.dropout(lowerCamelCase ) # decoder: No padding present. _UpperCAmelCase = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _UpperCAmelCase = [(x, self.encoder_decoder_mask(lowerCamelCase , lowerCamelCase )) for x, y in encodings_and_masks] # cross attend style: concat encodings _UpperCAmelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _UpperCAmelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _UpperCAmelCase = lyr( lowerCamelCase , conditioning_emb=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )[0] _UpperCAmelCase = self.decoder_norm(lowerCamelCase ) _UpperCAmelCase = self.post_dropout(lowerCamelCase ) _UpperCAmelCase = self.spec_out(lowerCamelCase ) return spec_out class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any=1E-6 ) -> int: """simple docstring""" super().__init__() _UpperCAmelCase = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase ) ) def lowerCamelCase ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : Any=None , lowerCamelCase : Optional[int]=None , ) -> str: """simple docstring""" _UpperCAmelCase = self.layer[0]( lowerCamelCase , conditioning_emb=lowerCamelCase , attention_mask=lowerCamelCase , ) if encoder_hidden_states is not None: _UpperCAmelCase = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) _UpperCAmelCase = self.layer[1]( lowerCamelCase , key_value_states=lowerCamelCase , attention_mask=lowerCamelCase , ) # Apply Film Conditional Feed Forward layer _UpperCAmelCase = self.layer[-1](lowerCamelCase , lowerCamelCase ) return (hidden_states,) class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : int ) -> Union[str, Any]: """simple docstring""" super().__init__() _UpperCAmelCase = TaLayerNorm(lowerCamelCase ) _UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase ) _UpperCAmelCase = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase ) _UpperCAmelCase = nn.Dropout(lowerCamelCase ) def lowerCamelCase ( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , lowerCamelCase : Dict=None , ) -> List[Any]: """simple docstring""" # pre_self_attention_layer_norm _UpperCAmelCase = self.layer_norm(lowerCamelCase ) if conditioning_emb is not None: _UpperCAmelCase = self.FiLMLayer(lowerCamelCase , lowerCamelCase ) # Self-attention block _UpperCAmelCase = self.attention(lowerCamelCase ) _UpperCAmelCase = hidden_states + self.dropout(lowerCamelCase ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> Dict: """simple docstring""" super().__init__() _UpperCAmelCase = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase ) _UpperCAmelCase = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase ) _UpperCAmelCase = nn.Dropout(lowerCamelCase ) def lowerCamelCase ( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : int=None , lowerCamelCase : Optional[Any]=None , ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.layer_norm(lowerCamelCase ) _UpperCAmelCase = self.attention( lowerCamelCase , encoder_hidden_states=lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , ) _UpperCAmelCase = hidden_states + self.dropout(lowerCamelCase ) return layer_output class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> Optional[Any]: """simple docstring""" super().__init__() _UpperCAmelCase = TaDenseGatedActDense(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase ) _UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase ) _UpperCAmelCase = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase ) _UpperCAmelCase = nn.Dropout(lowerCamelCase ) def lowerCamelCase ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : str=None ) -> Dict: """simple docstring""" _UpperCAmelCase = self.layer_norm(lowerCamelCase ) if conditioning_emb is not None: _UpperCAmelCase = self.film(lowerCamelCase , lowerCamelCase ) _UpperCAmelCase = self.DenseReluDense(lowerCamelCase ) _UpperCAmelCase = hidden_states + self.dropout(lowerCamelCase ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[str] ) -> Any: """simple docstring""" super().__init__() _UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) _UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) _UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) _UpperCAmelCase = nn.Dropout(lowerCamelCase ) _UpperCAmelCase = NewGELUActivation() def lowerCamelCase ( self : Tuple , lowerCamelCase : Dict ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.act(self.wi_a(lowerCamelCase ) ) _UpperCAmelCase = self.wi_a(lowerCamelCase ) _UpperCAmelCase = hidden_gelu * hidden_linear _UpperCAmelCase = self.dropout(lowerCamelCase ) _UpperCAmelCase = self.wo(lowerCamelCase ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : str , lowerCamelCase : Tuple , lowerCamelCase : str=1E-6 ) -> Optional[int]: """simple docstring""" super().__init__() _UpperCAmelCase = nn.Parameter(torch.ones(lowerCamelCase ) ) _UpperCAmelCase = eps def lowerCamelCase ( self : List[str] , lowerCamelCase : int ) -> Union[str, Any]: """simple docstring""" # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 _UpperCAmelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase ) _UpperCAmelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _UpperCAmelCase = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : torch.Tensor ) -> torch.Tensor: """simple docstring""" return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(lowerCamelCase , 3.0 )) )) class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Dict , lowerCamelCase : Any , lowerCamelCase : Any ) -> Optional[int]: """simple docstring""" super().__init__() _UpperCAmelCase = nn.Linear(lowerCamelCase , out_features * 2 , bias=lowerCamelCase ) def lowerCamelCase ( self : int , lowerCamelCase : str , lowerCamelCase : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.scale_bias(lowerCamelCase ) _UpperCAmelCase , _UpperCAmelCase = torch.chunk(lowerCamelCase , 2 , -1 ) _UpperCAmelCase = x * (1 + scale) + shift return x
402
0
lowerCAmelCase__ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' lowerCAmelCase__ = [{'type': 'code', 'content': INSTALL_CONTENT}] lowerCAmelCase__ = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
503
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase__ : '''simple docstring''' @property def _lowerCamelCase ( self) -> Tuple: return self.get_dummy_input() @property def _lowerCamelCase ( self) -> List[Any]: if self.block_type == "down": return (4, 3_2, 1_6, 1_6) elif self.block_type == "mid": return (4, 3_2, 3_2, 3_2) elif self.block_type == "up": return (4, 3_2, 6_4, 6_4) raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.") def _lowerCamelCase ( self , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , ) -> Dict: _A : Tuple = 4 _A : Optional[Any] = 3_2 _A : Optional[int] = (3_2, 3_2) _A : Dict = torch.manual_seed(0) _A : List[Any] = torch.device(__lowerCamelCase) _A : Union[str, Any] = (batch_size, num_channels) + sizes _A : int = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase) _A : Optional[int] = {"hidden_states": hidden_states} if include_temb: _A : Dict = 1_2_8 _A : Any = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase) if include_res_hidden_states_tuple: _A : str = torch.manual_seed(1) _A : str = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase),) if include_encoder_hidden_states: _A : Any = floats_tensor((batch_size, 3_2, 3_2)).to(__lowerCamelCase) if include_skip_sample: _A : List[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase) return dummy_input def _lowerCamelCase ( self) -> Optional[Any]: _A : int = { "in_channels": 3_2, "out_channels": 3_2, "temb_channels": 1_2_8, } if self.block_type == "up": _A : Optional[Any] = 3_2 if self.block_type == "mid": init_dict.pop("out_channels") _A : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: _A , _A : Optional[int] = self.prepare_init_args_and_inputs_for_common() _A : int = self.block_class(**__lowerCamelCase) unet_block.to(__lowerCamelCase) unet_block.eval() with torch.no_grad(): _A : Any = unet_block(**__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase): _A : Optional[Any] = output[0] self.assertEqual(output.shape , self.output_shape) _A : Optional[int] = output[0, -1, -3:, -3:] _A : Dict = torch.tensor(__lowerCamelCase).to(__lowerCamelCase) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps") def _lowerCamelCase ( self) -> Dict: _A , _A : Optional[int] = self.prepare_init_args_and_inputs_for_common() _A : Optional[int] = self.block_class(**__lowerCamelCase) model.to(__lowerCamelCase) model.train() _A : Tuple = model(**__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase): _A : List[Any] = output[0] _A : Any = torch.device(__lowerCamelCase) _A : Any = randn_tensor(output.shape , device=__lowerCamelCase) _A : Optional[int] = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase) loss.backward()
503
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : List[str] = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : int = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[int] = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[int] = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
707
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class a__ : _A = 42 _A = 42 class a__ : def __init__( self : Optional[Any] , A_ : int ) -> Optional[int]: """simple docstring""" lowerCamelCase_: list[list[Edge]] = [[] for _ in range(A_ )] lowerCamelCase_: Dict = size def __getitem__( self : Any , A_ : int ) -> Iterator[Edge]: """simple docstring""" return iter(self._graph[vertex] ) @property def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" return self._size def lowerCAmelCase ( self : Union[str, Any] , A_ : int , A_ : int , A_ : int ) -> Union[str, Any]: """simple docstring""" if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(A_ , A_ ) ) def lowerCAmelCase ( self : Any , A_ : int , A_ : int ) -> int | None: """simple docstring""" lowerCamelCase_: str = deque([start_vertex] ) lowerCamelCase_: list[int | None] = [None] * self.size lowerCamelCase_: int = 0 while queue: lowerCamelCase_: List[Any] = queue.popleft() lowerCamelCase_: Any = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowerCamelCase_: Dict = current_distance + edge.weight lowerCamelCase_: Dict = distances[edge.destination_vertex] if ( isinstance(A_ , A_ ) and new_distance >= dest_vertex_distance ): continue lowerCamelCase_: Dict = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
584
0
from __future__ import annotations def a_ ( __magic_name__ , __magic_name__ ) -> tuple[int, int]: """simple docstring""" if b == 0: return (1, 0) ((snake_case) , (snake_case)) : Any = extended_euclid(__magic_name__ , a % b ) snake_case : Dict = a // b return (y, x - k * y) def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int: """simple docstring""" ((snake_case) , (snake_case)) : str = extended_euclid(__magic_name__ , __magic_name__ ) snake_case : Tuple = na * na snake_case : str = ra * x * na + ra * y * na return (n % m + m) % m def a_ ( __magic_name__ , __magic_name__ ) -> int: """simple docstring""" ((snake_case) , (snake_case)) : Any = extended_euclid(__magic_name__ , __magic_name__ ) if b < 0: snake_case : int = (b % n + n) % n return b def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int: """simple docstring""" snake_case , snake_case : Dict = invert_modulo(__magic_name__ , __magic_name__ ), invert_modulo(__magic_name__ , __magic_name__ ) snake_case : Union[str, Any] = na * na snake_case : int = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='chinese_remainder_theorem', verbose=True) testmod(name='chinese_remainder_theorem2', verbose=True) testmod(name='invert_modulo', verbose=True) testmod(name='extended_euclid', verbose=True)
598
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class a_ ( a ): A__ : Optional[Any] = 'openai/whisper-base' A__ : Optional[Any] = ( 'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the ' 'transcribed text.' ) A__ : Union[str, Any] = 'transcriber' A__ : Optional[int] = WhisperProcessor A__ : List[str] = WhisperForConditionalGeneration A__ : List[Any] = ['audio'] A__ : Optional[int] = ['text'] def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ): """simple docstring""" return self.pre_processor(UpperCAmelCase__ , return_tensors='''pt''' ).input_features def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Any ): """simple docstring""" return self.model.generate(inputs=UpperCAmelCase__ ) def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any] ): """simple docstring""" return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0]
598
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig UpperCAmelCase_ = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class lowerCamelCase__( _lowercase): UpperCAmelCase__ : Tuple = '''albert''' def __init__( self: Any , UpperCamelCase_: List[str]=3_00_00 , UpperCamelCase_: Tuple=1_28 , UpperCamelCase_: Tuple=40_96 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Dict=1 , UpperCamelCase_: Optional[int]=64 , UpperCamelCase_: str=1_63_84 , UpperCamelCase_: Union[str, Any]=1 , UpperCamelCase_: List[Any]="gelu_new" , UpperCamelCase_: Optional[Any]=0 , UpperCamelCase_: Any=0 , UpperCamelCase_: Dict=5_12 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: Optional[int]=1E-12 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: List[str]=3 , **UpperCamelCase_: List[Any] , ): super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) __lowerCamelCase = vocab_size __lowerCamelCase = embedding_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_hidden_groups __lowerCamelCase = num_attention_heads __lowerCamelCase = inner_group_num __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = classifier_dropout_prob __lowerCamelCase = position_embedding_type class lowerCamelCase__( _lowercase): @property def lowerCAmelCase__ ( self: List[str] ): if self.task == "multiple-choice": __lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __lowerCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
700
from ... import PretrainedConfig UpperCAmelCase_ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP UpperCAmelCase__ : Dict = 'nezha' def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = max_relative_position __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = classifier_dropout __lowerCamelCase = use_cache
80
0
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) lowercase_ = _symbol_database.Default() lowercase_ = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) lowercase_ = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: lowercase_ = None lowercase_ = B'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" lowercase_ = 45 lowercase_ = 15_81 lowercase_ = 15_17 lowercase_ = 15_70 lowercase_ = 15_84 lowercase_ = 17_93 lowercase_ = 17_95 lowercase_ = 19_16 lowercase_ = 18_64 lowercase_ = 19_05 lowercase_ = 19_19 lowercase_ = 24_29 lowercase_ = 22_08 lowercase_ = 24_18 lowercase_ = 23_23 lowercase_ = 24_07 # @@protoc_insertion_point(module_scope)
562
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class A : '''simple docstring''' def __init__( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=13 , _UpperCamelCase : Union[str, Any]=7 , _UpperCamelCase : List[str]=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Any=True , _UpperCamelCase : Union[str, Any]=99 , _UpperCamelCase : Tuple=32 , _UpperCamelCase : List[Any]=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Any=0.0 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=512 , _UpperCamelCase : str=16 , _UpperCamelCase : str=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : int=4 , _UpperCamelCase : Tuple=None , ): _lowercase: Any = parent _lowercase: int = batch_size _lowercase: Tuple = seq_length _lowercase: Any = is_training _lowercase: Any = use_input_mask _lowercase: Union[str, Any] = use_token_type_ids _lowercase: int = use_labels _lowercase: int = vocab_size _lowercase: int = hidden_size _lowercase: Any = num_hidden_layers _lowercase: Tuple = num_attention_heads _lowercase: List[str] = intermediate_multiple_size _lowercase: Dict = hidden_act _lowercase: Optional[int] = hidden_dropout _lowercase: Optional[int] = attention_dropout _lowercase: Dict = weight_tying _lowercase: Union[str, Any] = max_position_embeddings _lowercase: str = type_vocab_size _lowercase: str = type_sequence_label_size _lowercase: Optional[int] = initializer_range _lowercase: List[Any] = num_labels _lowercase: Any = num_choices _lowercase: Optional[Any] = scope def UpperCAmelCase__ ( self : Optional[Any]): _lowercase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowercase: Optional[Any] = None if self.use_input_mask: _lowercase: List[str] = random_attention_mask([self.batch_size, self.seq_length]) _lowercase: Dict = None if self.use_labels: _lowercase: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _lowercase: Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self : Optional[int]): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : int): _lowercase , _lowercase , _lowercase , _lowercase: int = self.prepare_config_and_inputs() _lowercase: str = True return config, input_ids, input_mask, token_labels def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict): _lowercase: Dict = GPTNeoXJapaneseModel(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowercase: Optional[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase) _lowercase: Optional[int] = model(_UpperCamelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any): _lowercase: Tuple = True _lowercase: Optional[int] = GPTNeoXJapaneseModel(_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowercase: int = model(_UpperCamelCase , attention_mask=_UpperCamelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str): _lowercase: Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() _lowercase: Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any): _lowercase: Tuple = True _lowercase: Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_UpperCamelCase) model.to(_UpperCamelCase) model.eval() # first forward pass _lowercase: int = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase) _lowercase: str = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _lowercase: List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size) _lowercase: Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and _lowercase: List[Any] = torch.cat([input_ids, next_tokens] , dim=-1) _lowercase: List[Any] = torch.cat([input_mask, next_mask] , dim=-1) _lowercase: Union[str, Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase) _lowercase: Optional[int] = output_from_no_past["hidden_states"][0] _lowercase: Tuple = model( _UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )["hidden_states"][0] # select random slice _lowercase: Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1]).item() _lowercase: Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() _lowercase: Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3)) def UpperCAmelCase__ ( self : Dict): _lowercase: Union[str, Any] = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase , _lowercase: Tuple = config_and_inputs _lowercase: Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCamelCase : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () lowerCamelCase : Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () lowerCamelCase : List[Any] = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) lowerCamelCase : int = False lowerCamelCase : Optional[int] = False lowerCamelCase : int = False lowerCamelCase : List[str] = False def UpperCAmelCase__ ( self : Tuple): _lowercase: Optional[int] = GPTNeoXJapaneseModelTester(self) _lowercase: List[str] = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37) def UpperCAmelCase__ ( self : Optional[Any]): self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str]): _lowercase , _lowercase , _lowercase , _lowercase: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def UpperCAmelCase__ ( self : Optional[int]): _lowercase , _lowercase , _lowercase , _lowercase: int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def UpperCAmelCase__ ( self : Optional[Any]): # This regression test was failing with PyTorch < 1.3 _lowercase , _lowercase , _lowercase , _lowercase: Any = self.model_tester.prepare_config_and_inputs_for_decoder() _lowercase: int = None self.model_tester.create_and_check_model_as_decoder(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def UpperCAmelCase__ ( self : Optional[Any]): _lowercase , _lowercase , _lowercase , _lowercase: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def UpperCAmelCase__ ( self : str): _lowercase: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*_UpperCamelCase) @slow def UpperCAmelCase__ ( self : Any): _lowercase: List[str] = "abeja/gpt-neox-japanese-2.7b" _lowercase: Dict = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] _lowercase: Union[str, Any] = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", "100年後に必要とされる会社は、「人」が中心の会社です。", "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", "国境の長いトンネルを抜けると、そこは雪国だった。", "美味しい日本食といえば、やっぱりお寿司ですよね。", ] _lowercase: str = GPTNeoXJapaneseTokenizer.from_pretrained(_UpperCamelCase) _lowercase: Dict = GPTNeoXJapaneseForCausalLM.from_pretrained(_UpperCamelCase) _lowercase: List[Any] = [] for prompt in prompts: _lowercase: List[str] = tokenizer(_UpperCamelCase , return_tensors="pt").input_ids _lowercase: List[Any] = model.generate(_UpperCamelCase , max_length=50) _lowercase: str = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase) predicted_outputs += generated_string self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
226
0
class A_ : def __init__( self : List[str] , snake_case__ : Optional[int] ): lowercase = set_counts lowercase = max(__lowerCamelCase ) lowercase = len(__lowerCamelCase ) lowercase = [1] * num_sets lowercase = list(range(__lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Tuple , snake_case__ : Any ): lowercase = self.get_parent(__lowerCamelCase ) lowercase = self.get_parent(__lowerCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase = 0 lowercase = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase = 0 lowercase = src_parent lowercase = self.set_counts[src_parent] lowercase = max(self.max_set , __lowerCamelCase ) return True def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Optional[int] ): if self.parents[disj_set] == disj_set: return disj_set lowercase = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
701
from numpy import exp, pi, sqrt def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
72
0
'''simple docstring''' _lowerCAmelCase :List[str] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def __lowerCAmelCase ( a_ ) -> bytes: '''simple docstring''' if not isinstance(a_ , a_ ): SCREAMING_SNAKE_CASE : Optional[Any] = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(a_ ) SCREAMING_SNAKE_CASE : Dict = ''.join(bin(a_ )[2:].zfill(8 ) for byte in data ) SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ ) % 6 != 0 if padding_needed: # The padding that will be added later SCREAMING_SNAKE_CASE : Optional[int] = B'=' * ((6 - len(a_ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(a_ ) % 6) else: SCREAMING_SNAKE_CASE : int = B'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(a_ ) , 6 ) ).encode() + padding ) def __lowerCAmelCase ( a_ ) -> bytes: '''simple docstring''' if not isinstance(a_ , a_ ) and not isinstance(a_ , a_ ): SCREAMING_SNAKE_CASE : int = ( 'argument should be a bytes-like object or ASCII string, ' f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(a_ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(a_ , a_ ): try: SCREAMING_SNAKE_CASE : List[Any] = encoded_data.decode('utf-8' ) except UnicodeDecodeError: raise ValueError('base64 encoded data should only contain ASCII characters' ) SCREAMING_SNAKE_CASE : int = encoded_data.count('=' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(a_ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one SCREAMING_SNAKE_CASE : Optional[Any] = encoded_data[:-padding] SCREAMING_SNAKE_CASE : Optional[int] = ''.join( bin(B64_CHARSET.index(a_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: SCREAMING_SNAKE_CASE : Any = ''.join( bin(B64_CHARSET.index(a_ ) )[2:].zfill(6 ) for char in encoded_data ) SCREAMING_SNAKE_CASE : Optional[int] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(a_ ) , 8 ) ] return bytes(a_ ) if __name__ == "__main__": import doctest doctest.testmod()
251
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter _lowerCAmelCase :Optional[Any] = """Create a default config file for Accelerate with only a few flags set.""" def __lowerCAmelCase ( a_="no" , a_ = default_json_config_file , a_ = False ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = Path(a_ ) path.parent.mkdir(parents=a_ , exist_ok=a_ ) if path.exists(): print( f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False SCREAMING_SNAKE_CASE : List[str] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'compute_environment': 'LOCAL_MACHINE', 'mixed_precision': mixed_precision, } if torch.cuda.is_available(): SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.device_count() SCREAMING_SNAKE_CASE : str = num_gpus SCREAMING_SNAKE_CASE : Dict = False if num_gpus > 1: SCREAMING_SNAKE_CASE : List[str] = 'MULTI_GPU' else: SCREAMING_SNAKE_CASE : Optional[int] = 'NO' elif is_xpu_available() and use_xpu: SCREAMING_SNAKE_CASE : List[str] = torch.xpu.device_count() SCREAMING_SNAKE_CASE : List[Any] = num_xpus SCREAMING_SNAKE_CASE : Optional[int] = False if num_xpus > 1: SCREAMING_SNAKE_CASE : List[Any] = 'MULTI_XPU' else: SCREAMING_SNAKE_CASE : List[Any] = 'NO' elif is_npu_available(): SCREAMING_SNAKE_CASE : List[str] = torch.npu.device_count() SCREAMING_SNAKE_CASE : Any = num_npus SCREAMING_SNAKE_CASE : Dict = False if num_npus > 1: SCREAMING_SNAKE_CASE : Any = 'MULTI_NPU' else: SCREAMING_SNAKE_CASE : Union[str, Any] = 'NO' else: SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : List[Any] = 'NO' SCREAMING_SNAKE_CASE : Any = ClusterConfig(**a_ ) config.to_json_file(a_ ) return path def __lowerCAmelCase ( a_ , a_ ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = parser.add_parser('default' , parents=a_ , help=a_ , formatter_class=a_ ) parser.add_argument( '--config_file' , default=a_ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , dest='save_location' , ) parser.add_argument( '--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=a_ , help='Whether or not to use mixed precision training. ' 'Choose between FP16 and BF16 (bfloat16) training. ' 'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , ) parser.set_defaults(func=a_ ) return parser def __lowerCAmelCase ( a_ ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"""accelerate configuration saved at {config_file}""" )
251
1
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowerCAmelCase ( ): lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.parse_args_into_dataclasses()[0] lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: lowercase__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead." lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] ) lowercase__ = "" lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] ) lowercase__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
37
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _A = { 'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['VisionEncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['TFVisionEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['FlaxVisionEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
505
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _SCREAMING_SNAKE_CASE : Union[str, Any] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class A : '''simple docstring''' lowerCamelCase : Union[str, Any] = PegasusConfig lowerCamelCase : Any = {} lowerCamelCase : int = """gelu""" def __init__( self : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]=13 , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Dict=True , _UpperCamelCase : Dict=False , _UpperCamelCase : Dict=99 , _UpperCamelCase : str=32 , _UpperCamelCase : Dict=5 , _UpperCamelCase : str=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Any=20 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : int=1 , _UpperCamelCase : List[str]=0 , ): _lowercase: str = parent _lowercase: int = batch_size _lowercase: Optional[Any] = seq_length _lowercase: Dict = is_training _lowercase: List[str] = use_labels _lowercase: Dict = vocab_size _lowercase: Optional[Any] = hidden_size _lowercase: Optional[int] = num_hidden_layers _lowercase: int = num_attention_heads _lowercase: Tuple = intermediate_size _lowercase: Optional[int] = hidden_dropout_prob _lowercase: Union[str, Any] = attention_probs_dropout_prob _lowercase: Tuple = max_position_embeddings _lowercase: Optional[Any] = eos_token_id _lowercase: List[Any] = pad_token_id _lowercase: int = bos_token_id def UpperCAmelCase__ ( self : Tuple): _lowercase: int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size) _lowercase: str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1) _lowercase: List[str] = np.concatenate([input_ids, eos_tensor] , axis=1) _lowercase: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowercase: Tuple = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _lowercase: Any = prepare_pegasus_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) return config, inputs_dict def UpperCAmelCase__ ( self : int , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : str): _lowercase: List[Any] = 20 _lowercase: Optional[int] = model_class_name(_UpperCamelCase) _lowercase: Union[str, Any] = model.encode(inputs_dict["input_ids"]) _lowercase , _lowercase: List[str] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _lowercase: Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase) _lowercase: Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4") _lowercase: Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowercase: int = model.decode( decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , ) _lowercase: List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4") _lowercase: str = model.decode( decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , ) _lowercase: Optional[Any] = model.decode(_UpperCamelCase , _UpperCamelCase) _lowercase: List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}") def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any]): _lowercase: Optional[Any] = 20 _lowercase: List[Any] = model_class_name(_UpperCamelCase) _lowercase: str = model.encode(inputs_dict["input_ids"]) _lowercase , _lowercase: Tuple = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _lowercase: int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) _lowercase: Any = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase) _lowercase: Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowercase: List[str] = model.decode( decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , ) _lowercase: int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4") _lowercase: Dict = model.decode( decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , ) _lowercase: Tuple = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase) _lowercase: int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}") def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , ): if attention_mask is None: _lowercase: int = np.not_equal(__magic_name__ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _lowercase: Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class A ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCamelCase : Dict = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) lowerCamelCase : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () lowerCamelCase : Tuple = True lowerCamelCase : List[str] = False lowerCamelCase : int = False lowerCamelCase : Dict = False def UpperCAmelCase__ ( self : List[Any]): _lowercase: Any = FlaxPegasusModelTester(self) _lowercase: Optional[int] = ConfigTester(self , config_class=_UpperCamelCase) def UpperCAmelCase__ ( self : str): self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Tuple): _lowercase , _lowercase: List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def UpperCAmelCase__ ( self : Tuple): _lowercase , _lowercase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def UpperCAmelCase__ ( self : Optional[int]): _lowercase , _lowercase: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _lowercase: Union[str, Any] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase) _lowercase: Union[str, Any] = model_class(_UpperCamelCase) @jax.jit def encode_jitted(_UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : int): return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase) with self.subTest("JIT Enabled"): _lowercase: Tuple = encode_jitted(**_UpperCamelCase).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): _lowercase: Tuple = encode_jitted(**_UpperCamelCase).to_tuple() self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase)) for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase): self.assertEqual(jitted_output.shape , output.shape) def UpperCAmelCase__ ( self : Any): _lowercase , _lowercase: Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _lowercase: Union[str, Any] = model_class(_UpperCamelCase) _lowercase: Any = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"]) _lowercase: int = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(_UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict): return model.decode( decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , ) with self.subTest("JIT Enabled"): _lowercase: Dict = decode_jitted(**_UpperCamelCase).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): _lowercase: Union[str, Any] = decode_jitted(**_UpperCamelCase).to_tuple() self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase)) for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase): self.assertEqual(jitted_output.shape , output.shape) @slow def UpperCAmelCase__ ( self : str): for model_class_name in self.all_model_classes: _lowercase: Tuple = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_UpperCamelCase) _lowercase: Union[str, Any] = np.ones((1, 1)) _lowercase: List[str] = model(_UpperCamelCase) self.assertIsNotNone(_UpperCamelCase) @slow def UpperCAmelCase__ ( self : Optional[Any]): _lowercase: Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum") _lowercase: int = PegasusTokenizer.from_pretrained("google/pegasus-xsum") _lowercase: int = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] _lowercase: str = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] _lowercase: Any = tokenizer(_UpperCamelCase , return_tensors="np" , truncation=_UpperCamelCase , max_length=512 , padding=_UpperCamelCase) _lowercase: Union[str, Any] = model.generate(**_UpperCamelCase , num_beams=2).sequences _lowercase: Optional[int] = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase) assert tgt_text == decoded
226
0
"""simple docstring""" import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase (lowerCAmelCase__ , unittest.TestCase ): _SCREAMING_SNAKE_CASE : str = ConsistencyModelPipeline _SCREAMING_SNAKE_CASE : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _SCREAMING_SNAKE_CASE : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _SCREAMING_SNAKE_CASE : str = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def __snake_case ( self :Dict ) ->Any: lowercase : Optional[Any] = UNetaDModel.from_pretrained( """diffusers/consistency-models-test""" , subfolder="""test_unet""" , ) return unet @property def __snake_case ( self :List[Any] ) ->Dict: lowercase : Union[str, Any] = UNetaDModel.from_pretrained( """diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , ) return unet def __snake_case ( self :Optional[Any] , __magic_name__ :int=False ) ->Union[str, Any]: if class_cond: lowercase : str = self.dummy_cond_unet else: lowercase : Tuple = self.dummy_uncond_unet # Default to CM multistep sampler lowercase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) lowercase : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, } return components def __snake_case ( self :int , __magic_name__ :int , __magic_name__ :List[str]=0 ) ->int: if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ): lowercase : str = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: lowercase : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) lowercase : Optional[Any] = { """batch_size""": 1, """num_inference_steps""": None, """timesteps""": [22, 0], """generator""": generator, """output_type""": """np""", } return inputs def __snake_case ( self :List[Any] ) ->List[Any]: lowercase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase : Dict = self.get_dummy_components() lowercase : int = ConsistencyModelPipeline(**_SCREAMING_SNAKE_CASE ) lowercase : int = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowercase : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) lowercase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images assert image.shape == (1, 32, 32, 3) lowercase : Optional[int] = image[0, -3:, -3:, -1] lowercase : Union[str, Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __snake_case ( self :str ) ->str: lowercase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase : str = self.get_dummy_components(class_cond=_SCREAMING_SNAKE_CASE ) lowercase : Optional[Any] = ConsistencyModelPipeline(**_SCREAMING_SNAKE_CASE ) lowercase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowercase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) lowercase : Dict = 0 lowercase : Union[str, Any] = pipe(**_SCREAMING_SNAKE_CASE ).images assert image.shape == (1, 32, 32, 3) lowercase : Dict = image[0, -3:, -3:, -1] lowercase : Any = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __snake_case ( self :List[str] ) ->Optional[int]: lowercase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase : List[str] = self.get_dummy_components() lowercase : Tuple = ConsistencyModelPipeline(**_SCREAMING_SNAKE_CASE ) lowercase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowercase : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) lowercase : Optional[Any] = 1 lowercase : List[str] = None lowercase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images assert image.shape == (1, 32, 32, 3) lowercase : Dict = image[0, -3:, -3:, -1] lowercase : int = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __snake_case ( self :str ) ->Optional[Any]: lowercase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase : List[Any] = self.get_dummy_components(class_cond=_SCREAMING_SNAKE_CASE ) lowercase : Any = ConsistencyModelPipeline(**_SCREAMING_SNAKE_CASE ) lowercase : Any = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowercase : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) lowercase : List[Any] = 1 lowercase : List[str] = None lowercase : List[Any] = 0 lowercase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images assert image.shape == (1, 32, 32, 3) lowercase : Optional[Any] = image[0, -3:, -3:, -1] lowercase : str = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class UpperCamelCase (unittest.TestCase ): def __snake_case ( self :Any ) ->str: super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self :List[Any] , __magic_name__ :List[str]=0 , __magic_name__ :Dict=False , __magic_name__ :Tuple="cpu" , __magic_name__ :Optional[Any]=torch.floataa , __magic_name__ :str=(1, 3, 64, 64) ) ->Tuple: lowercase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE ) lowercase : List[Any] = { """num_inference_steps""": None, """timesteps""": [22, 0], """class_labels""": 0, """generator""": generator, """output_type""": """np""", } if get_fixed_latents: lowercase : str = self.get_fixed_latents(seed=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , shape=_SCREAMING_SNAKE_CASE ) lowercase : Any = latents return inputs def __snake_case ( self :int , __magic_name__ :List[Any]=0 , __magic_name__ :Tuple="cpu" , __magic_name__ :Any=torch.floataa , __magic_name__ :Dict=(1, 3, 64, 64) ) ->Optional[int]: if type(_SCREAMING_SNAKE_CASE ) == str: lowercase : Union[str, Any] = torch.device(_SCREAMING_SNAKE_CASE ) lowercase : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) lowercase : Any = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) return latents def __snake_case ( self :Optional[int] ) ->Any: lowercase : Any = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) lowercase : Union[str, Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) lowercase : Optional[int] = ConsistencyModelPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE ) pipe.to(torch_device=_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowercase : Optional[int] = self.get_inputs() lowercase : Union[str, Any] = pipe(**_SCREAMING_SNAKE_CASE ).images assert image.shape == (1, 64, 64, 3) lowercase : str = image[0, -3:, -3:, -1] lowercase : Tuple = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self :str ) ->str: lowercase : Union[str, Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) lowercase : str = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) lowercase : Union[str, Any] = ConsistencyModelPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE ) pipe.to(torch_device=_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowercase : Tuple = self.get_inputs() lowercase : Tuple = 1 lowercase : Any = None lowercase : Tuple = pipe(**_SCREAMING_SNAKE_CASE ).images assert image.shape == (1, 64, 64, 3) lowercase : str = image[0, -3:, -3:, -1] lowercase : Union[str, Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def __snake_case ( self :Dict ) ->Any: lowercase : List[str] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) lowercase : Union[str, Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) lowercase : List[str] = ConsistencyModelPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE ) pipe.to(torch_device=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowercase : Optional[int] = self.get_inputs(get_fixed_latents=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=_SCREAMING_SNAKE_CASE , enable_math=_SCREAMING_SNAKE_CASE , enable_mem_efficient=_SCREAMING_SNAKE_CASE ): lowercase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images assert image.shape == (1, 64, 64, 3) lowercase : str = image[0, -3:, -3:, -1] lowercase : str = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def __snake_case ( self :str ) ->List[Any]: lowercase : Union[str, Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) lowercase : Any = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) lowercase : Dict = ConsistencyModelPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE ) pipe.to(torch_device=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowercase : Tuple = self.get_inputs(get_fixed_latents=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) lowercase : Optional[int] = 1 lowercase : Dict = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=_SCREAMING_SNAKE_CASE , enable_math=_SCREAMING_SNAKE_CASE , enable_mem_efficient=_SCREAMING_SNAKE_CASE ): lowercase : Any = pipe(**_SCREAMING_SNAKE_CASE ).images assert image.shape == (1, 64, 64, 3) lowercase : Any = image[0, -3:, -3:, -1] lowercase : Optional[Any] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
709
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _lowerCAmelCase = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class UpperCamelCase (unittest.TestCase ): def __snake_case ( self :str , __magic_name__ :Path , __magic_name__ :Union[str, None] = None , __magic_name__ :Union[List[str], None] = None , __magic_name__ :Union[str, List[str], None] = None , __magic_name__ :bool = True , ) ->Optional[Any]: lowercase : Dict = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )] if identifier is not None: lowercase : Tuple = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(__magic_name__ , __magic_name__ ): for n_ in n_identifier: lowercase : List[str] = [file for file in files if n_ not in file] else: lowercase : str = [file for file in files if n_identifier not in file] lowercase : List[str] = ignore_files or [] ignore_files.append("""__init__.py""" ) lowercase : Tuple = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , __magic_name__ ) if only_modules: lowercase : List[Any] = file.split(""".""" )[0] try: lowercase : Dict = getattr(__magic_name__ , __magic_name__ ) lowercase : Dict = doctest.DocTestSuite(__magic_name__ ) lowercase : Optional[int] = unittest.TextTestRunner().run(__magic_name__ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f"""{module_identifier} is not a module.""" ) else: lowercase : List[str] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def __snake_case ( self :Optional[Any] ) ->Dict: lowercase : int = Path("""src/transformers""" ) lowercase : Tuple = """modeling""" lowercase : List[str] = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ ) def __snake_case ( self :str ) ->str: lowercase : Optional[int] = Path("""src/transformers""" ) lowercase : Tuple = """tokenization""" self.analyze_directory(__magic_name__ , identifier=__magic_name__ ) def __snake_case ( self :Optional[int] ) ->str: lowercase : Tuple = Path("""src/transformers""" ) lowercase : List[Any] = """configuration""" self.analyze_directory(__magic_name__ , identifier=__magic_name__ ) def __snake_case ( self :Tuple ) ->Any: lowercase : str = Path("""src/transformers""" ) lowercase : Optional[int] = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ ) def __snake_case ( self :List[str] ) ->Tuple: lowercase : List[str] = Path("""docs/source""" ) lowercase : int = ["""favicon.ico"""] self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
348
0
import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline _a: Dict = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""") def __lowerCAmelCase ( A , A , A , A , A , A , A , A=False , ): output_path.parent.mkdir(parents=A , exist_ok=A ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( A , A , f=output_path.as_posix() , input_names=A , output_names=A , dynamic_axes=A , do_constant_folding=A , use_external_data_format=A , enable_onnx_checker=A , opset_version=A , ) else: export( A , A , f=output_path.as_posix() , input_names=A , output_names=A , dynamic_axes=A , do_constant_folding=A , opset_version=A , ) @torch.no_grad() def __lowerCAmelCase ( A , A , A , A = False ): UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): UpperCAmelCase_ = "cuda" elif fpaa and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA" ) else: UpperCAmelCase_ = "cpu" UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(A , torch_dtype=A ).to(A ) UpperCAmelCase_ = Path(A ) # TEXT ENCODER UpperCAmelCase_ = pipeline.text_encoder.config.max_position_embeddings UpperCAmelCase_ = pipeline.text_encoder.config.hidden_size UpperCAmelCase_ = pipeline.tokenizer( "A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=A , return_tensors="pt" , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=A , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={ "input_ids": {0: "batch", 1: "sequence"}, } , opset=A , ) del pipeline.text_encoder # UNET UpperCAmelCase_ = pipeline.unet.config.in_channels UpperCAmelCase_ = pipeline.unet.config.sample_size UpperCAmelCase_ = output_path / "unet" / "model.onnx" onnx_export( pipeline.unet , model_args=( torch.randn(2 , A , A , A ).to(device=A , dtype=A ), torch.randn(2 ).to(device=A , dtype=A ), torch.randn(2 , A , A ).to(device=A , dtype=A ), False, ) , output_path=A , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "timestep": {0: "batch"}, "encoder_hidden_states": {0: "batch", 1: "sequence"}, } , opset=A , use_external_data_format=A , ) UpperCAmelCase_ = str(unet_path.absolute().as_posix() ) UpperCAmelCase_ = os.path.dirname(A ) UpperCAmelCase_ = onnx.load(A ) # clean up existing tensor files shutil.rmtree(A ) os.mkdir(A ) # collate external tensor files into one onnx.save_model( A , A , save_as_external_data=A , all_tensors_to_one_file=A , location="weights.pb" , convert_attribute=A , ) del pipeline.unet # VAE ENCODER UpperCAmelCase_ = pipeline.vae UpperCAmelCase_ = vae_encoder.config.in_channels UpperCAmelCase_ = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder UpperCAmelCase_ = lambda A , A : vae_encoder.encode(A , A )[0].sample() onnx_export( A , model_args=( torch.randn(1 , A , A , A ).to(device=A , dtype=A ), False, ) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=A , ) # VAE DECODER UpperCAmelCase_ = pipeline.vae UpperCAmelCase_ = vae_decoder.config.latent_channels UpperCAmelCase_ = vae_decoder.config.out_channels # forward only through the decoder part UpperCAmelCase_ = vae_encoder.decode onnx_export( A , model_args=( torch.randn(1 , A , A , A ).to(device=A , dtype=A ), False, ) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=A , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: UpperCAmelCase_ = pipeline.safety_checker UpperCAmelCase_ = safety_checker.config.vision_config.num_channels UpperCAmelCase_ = safety_checker.config.vision_config.image_size UpperCAmelCase_ = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , A , A , A , ).to(device=A , dtype=A ), torch.randn(1 , A , A , A ).to(device=A , dtype=A ), ) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={ "clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "images": {0: "batch", 1: "height", 2: "width", 3: "channels"}, } , opset=A , ) del pipeline.safety_checker UpperCAmelCase_ = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" ) UpperCAmelCase_ = pipeline.feature_extractor else: UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=A , feature_extractor=A , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(A ) print("ONNX pipeline saved to" , A ) del pipeline del onnx_pipeline UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(A , provider="CPUExecutionProvider" ) print("ONNX pipeline is loadable" ) if __name__ == "__main__": _a: Any = argparse.ArgumentParser() parser.add_argument( """--model_path""", type=str, required=True, help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""", ) parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--opset""", default=14, type=int, help="""The version of the ONNX operator set to use.""", ) parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""") _a: Any = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
162
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument _a: List[Any] = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def __lowerCAmelCase ( A ): # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model UpperCAmelCase_ = list(s_dict.keys() ) for key in keys: UpperCAmelCase_ = r".*/layers_(\d+)" UpperCAmelCase_ = key if re.match(A , A ): UpperCAmelCase_ = re.sub(r"layers_(\d+)" , r"block/\1/layer" , A ) UpperCAmelCase_ = r"(encoder|decoder)\/" if re.match(A , A ): UpperCAmelCase_ = re.match(A , A ).groups() if groups[0] == "encoder": UpperCAmelCase_ = re.sub(r"/mlp/" , r"/1/mlp/" , A ) UpperCAmelCase_ = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , A ) elif groups[0] == "decoder": UpperCAmelCase_ = re.sub(r"/mlp/" , r"/2/mlp/" , A ) UpperCAmelCase_ = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , A ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: UpperCAmelCase_ = new_key.replace(A , A ) print(F"{key} -> {new_key}" ) UpperCAmelCase_ = s_dict.pop(A ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase_ = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase_ = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: UpperCAmelCase_ = s_dict[key].shape[0] UpperCAmelCase_ = s_dict[key] for idx in range(A ): UpperCAmelCase_ = expert_weihts[idx] print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" ) s_dict.pop(A ) return s_dict _a: Any = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def __lowerCAmelCase ( A , A ): # Convert a google style config to the hugging face fromat import regex as re with open(A , "r" ) as f: UpperCAmelCase_ = f.read() UpperCAmelCase_ = re.findall(r"(.*) = ([0-9.]*)" , A ) UpperCAmelCase_ = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": UpperCAmelCase_ = float(A ) if "." in value else int(A ) UpperCAmelCase_ = re.findall(r"(.*activations) = \(\'(.*)\',\)" , A )[0] UpperCAmelCase_ = str(activation[1] ) UpperCAmelCase_ = num_experts UpperCAmelCase_ = SwitchTransformersConfig(**A ) return config def __lowerCAmelCase ( A , A , A=None , A="./" , A=8 ): # Initialise PyTorch model print(F"Loading flax weights from : {flax_checkpoint_path}" ) UpperCAmelCase_ = checkpoints.load_tax_checkpoint(A ) if gin_file is not None: UpperCAmelCase_ = convert_gin_to_config(A , A ) else: UpperCAmelCase_ = SwitchTransformersConfig.from_pretrained(A ) UpperCAmelCase_ = SwitchTransformersForConditionalGeneration(A ) UpperCAmelCase_ = flax_params["target"] UpperCAmelCase_ = flatten_dict(A , sep="/" ) UpperCAmelCase_ = rename_keys(A ) UpperCAmelCase_ = unflatten_dict(A , sep="/" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(A , A ) print(F"Save PyTorch model to {pytorch_dump_path}" ) pt_model.save_pretrained(A ) if __name__ == "__main__": _a: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") _a: List[Any] = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
162
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class lowerCAmelCase ( unittest.TestCase ): def UpperCAmelCase ( self :List[str] ): '''simple docstring''' lowercase__ = "ZinengTang/tvlt-base" lowercase__ = tempfile.mkdtemp() def UpperCAmelCase ( self :int , **_lowercase :Optional[int] ): '''simple docstring''' return TvltImageProcessor.from_pretrained(self.checkpoint , **_lowercase ) def UpperCAmelCase ( self :Optional[int] , **_lowercase :Optional[int] ): '''simple docstring''' return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_lowercase ) def UpperCAmelCase ( self :Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_feature_extractor() lowercase__ = TvltProcessor(image_processor=_lowercase , feature_extractor=_lowercase ) processor.save_pretrained(self.tmpdirname ) lowercase__ = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , _lowercase ) self.assertIsInstance(processor.image_processor , _lowercase ) def UpperCAmelCase ( self :Dict ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_feature_extractor() lowercase__ = TvltProcessor(image_processor=_lowercase , feature_extractor=_lowercase ) lowercase__ = np.ones([1_20_00] ) lowercase__ = feature_extractor(_lowercase , return_tensors="np" ) lowercase__ = processor(audio=_lowercase , return_tensors="np" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase ( self :Optional[int] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_feature_extractor() lowercase__ = TvltProcessor(image_processor=_lowercase , feature_extractor=_lowercase ) lowercase__ = np.ones([3, 2_24, 2_24] ) lowercase__ = image_processor(_lowercase , return_tensors="np" ) lowercase__ = processor(images=_lowercase , return_tensors="np" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase ( self :Tuple ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_feature_extractor() lowercase__ = TvltProcessor(image_processor=_lowercase , feature_extractor=_lowercase ) lowercase__ = np.ones([1_20_00] ) lowercase__ = np.ones([3, 2_24, 2_24] ) lowercase__ = processor(audio=_lowercase , images=_lowercase ) self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] ) # test if it raises when no input is passed with pytest.raises(_lowercase ): processor() def UpperCAmelCase ( self :List[str] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_feature_extractor() lowercase__ = TvltProcessor(image_processor=_lowercase , feature_extractor=_lowercase ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
611
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): __lowerCamelCase = CycleDiffusionPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'negative_prompt', 'height', 'width', 'negative_prompt_embeds', } __lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'} __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} ) __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) lowercase__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , ) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowercase__ = CLIPTextModel(_lowercase ) lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowercase__ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCAmelCase ( self :Optional[Any] , _lowercase :List[Any] , _lowercase :Union[str, Any]=0 ): '''simple docstring''' lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase ) lowercase__ = image / 2 + 0.5 if str(_lowercase ).startswith("mps" ): lowercase__ = torch.manual_seed(_lowercase ) else: lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) lowercase__ = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def UpperCAmelCase ( self :Dict ): '''simple docstring''' lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = CycleDiffusionPipeline(**_lowercase ) lowercase__ = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) lowercase__ = self.get_dummy_inputs(_lowercase ) lowercase__ = pipe(**_lowercase ) lowercase__ = output.images lowercase__ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) lowercase__ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def UpperCAmelCase ( self :Tuple ): '''simple docstring''' lowercase__ = self.get_dummy_components() for name, module in components.items(): if hasattr(_lowercase , "half" ): lowercase__ = module.half() lowercase__ = CycleDiffusionPipeline(**_lowercase ) lowercase__ = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) lowercase__ = self.get_dummy_inputs(_lowercase ) lowercase__ = pipe(**_lowercase ) lowercase__ = output.images lowercase__ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) lowercase__ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def UpperCAmelCase ( self :Union[str, Any] ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def UpperCAmelCase ( self :Tuple ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class lowerCAmelCase ( unittest.TestCase ): def UpperCAmelCase ( self :str ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self :Union[str, Any] ): '''simple docstring''' lowercase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) lowercase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) lowercase__ = init_image.resize((5_12, 5_12) ) lowercase__ = "CompVis/stable-diffusion-v1-4" lowercase__ = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" ) lowercase__ = CycleDiffusionPipeline.from_pretrained( _lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() lowercase__ = "A black colored car" lowercase__ = "A blue colored car" lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe( prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type="np" , ) lowercase__ = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def UpperCAmelCase ( self :Optional[int] ): '''simple docstring''' lowercase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) lowercase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) lowercase__ = init_image.resize((5_12, 5_12) ) lowercase__ = "CompVis/stable-diffusion-v1-4" lowercase__ = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" ) lowercase__ = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() lowercase__ = "A black colored car" lowercase__ = "A blue colored car" lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe( prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type="np" , ) lowercase__ = output.images assert np.abs(image - expected_image ).max() < 2e-2
611
1
from collections import deque def UpperCamelCase ( _a ) -> Optional[int]: '''simple docstring''' lowercase_ :Tuple = len(_a ) lowercase_ :Dict = deque() lowercase_ :Optional[Any] = [False for _ in range(_a )] lowercase_ :Any = [-1 for _ in range(_a )] lowercase_ :int = index_of[:] def strong_connect(_a , _a , _a ): lowercase_ :List[str] = index # the number when this node is seen lowercase_ :Tuple = index # lowest rank node reachable from here index += 1 stack.append(_a ) lowercase_ :Optional[Any] = True for w in g[v]: if index_of[w] == -1: lowercase_ :List[str] = strong_connect(_a , _a , _a ) lowercase_ :Optional[Any] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: lowercase_ :str = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: lowercase_ :Dict = [] lowercase_ :List[str] = stack.pop() lowercase_ :Optional[Any] = False component.append(_a ) while w != v: lowercase_ :str = stack.pop() lowercase_ :List[str] = False component.append(_a ) components.append(_a ) return index lowercase_ :Any = [] for v in range(_a ): if index_of[v] == -1: strong_connect(_a , 0 , _a ) return components def UpperCamelCase ( _a , _a ) -> Any: '''simple docstring''' lowercase_ :Optional[int] = [[] for _ in range(_a )] for u, v in edges: g[u].append(_a ) return g if __name__ == "__main__": # Test SCREAMING_SNAKE_CASE : Optional[int] = 7 SCREAMING_SNAKE_CASE : List[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6] SCREAMING_SNAKE_CASE : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5] SCREAMING_SNAKE_CASE : int = [(u, v) for u, v in zip(source, target)] SCREAMING_SNAKE_CASE : Any = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
257
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE : Union[str, Any] = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } SCREAMING_SNAKE_CASE : str = { "facebook/mbart-large-en-ro": 1_024, "facebook/mbart-large-cc25": 1_024, } # fmt: off SCREAMING_SNAKE_CASE : Optional[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class UpperCamelCase ( lowercase__ ): '''simple docstring''' lowercase : List[str] =VOCAB_FILES_NAMES lowercase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Union[str, Any] =["""input_ids""", """attention_mask"""] lowercase : Optional[int] =MBartTokenizer lowercase : List[int] =[] lowercase : List[int] =[] def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ): # Mask token behave like a normal word, i.e. include the space before it lowercase_ :Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) lowercase_ :Optional[int] = vocab_file lowercase_ :Any = False if not self.vocab_file else True lowercase_ :int = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) lowercase_ :Optional[int] = { lang_code: self.convert_tokens_to_ids(UpperCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowercase_ :Dict = src_lang if src_lang is not None else '''en_XX''' lowercase_ :Any = self.convert_tokens_to_ids(self._src_lang ) lowercase_ :Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase ( self ): return self._src_lang @src_lang.setter def UpperCamelCase ( self , UpperCamelCase_ ): lowercase_ :Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ): lowercase_ :Optional[Any] = [self.sep_token_id] lowercase_ :Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ): if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) lowercase_ :str = src_lang lowercase_ :List[Any] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) lowercase_ :Union[str, Any] = self.convert_tokens_to_ids(UpperCamelCase_ ) lowercase_ :Any = tgt_lang_id return inputs def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "en_XX" , UpperCamelCase_ = None , UpperCamelCase_ = "ro_RO" , **UpperCamelCase_ , ): lowercase_ :List[str] = src_lang lowercase_ :Any = tgt_lang return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def UpperCamelCase ( self ): return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase ( self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase ( self , UpperCamelCase_ ): lowercase_ :Union[str, Any] = self.convert_tokens_to_ids(UpperCamelCase_ ) lowercase_ :Tuple = [] lowercase_ :Tuple = [self.eos_token_id, self.cur_lang_code] lowercase_ :Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) lowercase_ :int = self.convert_ids_to_tokens(self.suffix_tokens ) lowercase_ :Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCamelCase ( self , UpperCamelCase_ ): lowercase_ :List[str] = self.convert_tokens_to_ids(UpperCamelCase_ ) lowercase_ :Union[str, Any] = [] lowercase_ :Union[str, Any] = [self.eos_token_id, self.cur_lang_code] lowercase_ :Dict = self.convert_ids_to_tokens(self.prefix_tokens ) lowercase_ :List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) lowercase_ :int = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory." ) return lowercase_ :Dict = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
257
1
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def lowerCamelCase__ ( _A , _A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , ): '''simple docstring''' if attention_mask is None: snake_case_ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: snake_case_ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: snake_case_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_A ) if decoder_head_mask is None: snake_case_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_A ) if cross_attn_head_mask is None: snake_case_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_A ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class UpperCAmelCase : '''simple docstring''' def __init__( self : Dict , __lowercase : str , __lowercase : Optional[int]=13 , __lowercase : Any=7 , __lowercase : Any=True , __lowercase : Any=False , __lowercase : int=99 , __lowercase : int=16 , __lowercase : int=2 , __lowercase : str=4 , __lowercase : List[Any]=4 , __lowercase : Any="relu" , __lowercase : Tuple=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Any=0.0 , __lowercase : int=0.0 , __lowercase : Union[str, Any]=20 , __lowercase : str=2 , __lowercase : List[Any]=1 , __lowercase : Union[str, Any]=0 , ): """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = max_position_embeddings snake_case_ = eos_token_id snake_case_ = pad_token_id snake_case_ = bos_token_id def snake_case__ ( self : List[Any] ): """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = self.eos_token_id # Eos Token snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input snake_case_ = input_ids.clamp(self.pad_token_id + 1 ) snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 ) snake_case_ = self.get_config() snake_case_ = prepare_mam_aaa_inputs_dict(__lowercase , __lowercase , __lowercase ) return config, inputs_dict def snake_case__ ( self : int ): """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def snake_case__ ( self : Any ): """simple docstring""" snake_case_ , snake_case_ = self.prepare_config_and_inputs() return config, inputs_dict def snake_case__ ( self : Any , __lowercase : List[Any] , __lowercase : int ): """simple docstring""" snake_case_ = MaMaaaModel(config=__lowercase ).get_decoder().to(__lowercase ).eval() snake_case_ = inputs_dict["input_ids"] snake_case_ = inputs_dict["attention_mask"] snake_case_ = inputs_dict["head_mask"] # first forward pass snake_case_ = model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase ) snake_case_ , snake_case_ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) snake_case_ = model(__lowercase , attention_mask=__lowercase )["last_hidden_state"] snake_case_ = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[ "last_hidden_state" ] # select random slice snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-2 ) ) def snake_case__ ( self : List[Any] , __lowercase : Tuple , __lowercase : Tuple ): """simple docstring""" snake_case_ = MaMaaaModel(config=__lowercase ).to(__lowercase ).eval() snake_case_ = model(**__lowercase ) snake_case_ = outputs.encoder_last_hidden_state snake_case_ = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = model.get_encoder() encoder.save_pretrained(__lowercase ) snake_case_ = MaMaaaEncoder.from_pretrained(__lowercase ).to(__lowercase ) snake_case_ = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = model.get_decoder() decoder.save_pretrained(__lowercase ) snake_case_ = MaMaaaDecoder.from_pretrained(__lowercase ).to(__lowercase ) snake_case_ = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__lowercase , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) lowerCAmelCase_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else () lowerCAmelCase_ = ( { '''conversational''': MaMaaaForConditionalGeneration, '''feature-extraction''': MaMaaaModel, '''summarization''': MaMaaaForConditionalGeneration, '''text2text-generation''': MaMaaaForConditionalGeneration, '''translation''': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = False def snake_case__ ( self : Tuple , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : str , __lowercase : Any ): """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def snake_case__ ( self : Optional[int] ): """simple docstring""" snake_case_ = MaMaaaModelTester(self ) snake_case_ = ConfigTester(self , config_class=__lowercase ) def snake_case__ ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def snake_case__ ( self : Optional[Any] ): """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case_ = model_class(__lowercase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowercase ) snake_case_ , snake_case_ = model_class.from_pretrained(__lowercase , output_loading_info=__lowercase ) self.assertEqual(info["missing_keys"] , [] ) def snake_case__ ( self : Optional[Any] ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowercase ) def snake_case__ ( self : Dict ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__lowercase ) def snake_case__ ( self : List[str] ): """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): snake_case_ = model_class(__lowercase ) model.to(__lowercase ) model.eval() snake_case_ = copy.deepcopy(self._prepare_for_class(__lowercase , __lowercase ) ) if not self.is_encoder_decoder: snake_case_ = inputs["input_ids"] del inputs["input_ids"] else: snake_case_ = inputs["input_ids"] snake_case_ = inputs.get("decoder_input_ids" , __lowercase ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , __lowercase ) snake_case_ = model.get_input_embeddings() if not self.is_encoder_decoder: snake_case_ = wte(__lowercase ) else: snake_case_ = wte(__lowercase ) snake_case_ = wte(__lowercase ) with torch.no_grad(): model(**__lowercase )[0] def snake_case__ ( self : int ): """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() snake_case_ = input_dict["input_ids"] snake_case_ = input_ids.ne(1 ).to(__lowercase ) snake_case_ = MaMaaaForConditionalGeneration(__lowercase ).eval().to(__lowercase ) if torch_device == "cuda": model.half() model.generate(__lowercase , attention_mask=__lowercase ) model.generate(num_beams=4 , do_sample=__lowercase , early_stopping=__lowercase , num_return_sequences=3 ) def lowerCamelCase__ ( _A ): '''simple docstring''' return torch.tensor(_A , dtype=torch.long , device=_A ) lowercase__ : Optional[Any] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case__ ( self : Optional[Any] ): """simple docstring""" return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def snake_case__ ( self : Optional[Any] ): """simple docstring""" snake_case_ = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__lowercase ) snake_case_ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) snake_case_ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) snake_case_ = prepare_mam_aaa_inputs_dict(model.config , __lowercase , __lowercase ) with torch.no_grad(): snake_case_ = model(**__lowercase )[0] snake_case_ = torch.Size((1, 11, 10_24) ) self.assertEqual(output.shape , __lowercase ) # change to expected output here snake_case_ = torch.tensor( [[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__lowercase ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=__lowercase ) ) def snake_case__ ( self : List[Any] ): """simple docstring""" snake_case_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__lowercase ) # change to intended input snake_case_ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) snake_case_ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) snake_case_ = prepare_mam_aaa_inputs_dict(model.config , __lowercase , __lowercase ) with torch.no_grad(): snake_case_ = model(**__lowercase )[0] snake_case_ = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , __lowercase ) # change to expected output here snake_case_ = torch.tensor( [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__lowercase ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=__lowercase ) ) def snake_case__ ( self : Any ): """simple docstring""" snake_case_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__lowercase ) snake_case_ = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) snake_case_ = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams snake_case_ = tokenizer(__lowercase , padding=__lowercase , return_tensors="pt" ) snake_case_ = model.generate( input_ids=dct["input_ids"].to(__lowercase ) , attention_mask=dct["attention_mask"].to(__lowercase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) snake_case_ = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] snake_case_ = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__lowercase , skip_special_tokens=__lowercase ) assert generated == expected_en
139
lowercase__ : Optional[int] = range(2, 20 + 1) lowercase__ : List[str] = [10**k for k in range(ks[-1] + 1)] lowercase__ : dict[int, dict[int, list[list[int]]]] = {} def lowerCamelCase__ ( _A , _A , _A , _A ): '''simple docstring''' snake_case_ = sum(a_i[j] for j in range(_A , len(_A ) ) ) snake_case_ = sum(a_i[j] * base[j] for j in range(min(len(_A ) , _A ) ) ) snake_case_ , snake_case_ = 0, 0 snake_case_ = n - i snake_case_ = memo.get(_A ) if sub_memo is not None: snake_case_ = sub_memo.get(_A ) if jumps is not None and len(_A ) > 0: # find and make the largest jump without going over snake_case_ = -1 for _k in range(len(_A ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: snake_case_ = _k break if max_jump >= 0: snake_case_ , snake_case_ , snake_case_ = jumps[max_jump] # since the difference between jumps is cached, add c snake_case_ = diff + c for j in range(min(_A , len(_A ) ) ): snake_case_ , snake_case_ = divmod(_A , 10 ) if new_c > 0: add(_A , _A , _A ) else: snake_case_ = [] else: snake_case_ = {c: []} snake_case_ = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps snake_case_ , snake_case_ = next_term(_A , k - 1 , i + dn , _A ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead snake_case_ , snake_case_ = compute(_A , _A , i + dn , _A ) diff += _diff dn += terms_jumped snake_case_ = sub_memo[c] # keep jumps sorted by # of terms skipped snake_case_ = 0 while j < len(_A ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(_A , (diff, dn, k) ) return (diff, dn) def lowerCamelCase__ ( _A , _A , _A , _A ): '''simple docstring''' if i >= n: return 0, i if k > len(_A ): a_i.extend([0 for _ in range(k - len(_A ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) snake_case_ = i snake_case_ , snake_case_ , snake_case_ = 0, 0, 0 for j in range(len(_A ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 snake_case_ = ds_c + ds_b diff += addend snake_case_ = 0 for j in range(_A ): snake_case_ = a_i[j] + addend snake_case_ , snake_case_ = divmod(_A , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(_A , _A , _A ) return diff, i - start_i def lowerCamelCase__ ( _A , _A , _A ): '''simple docstring''' for j in range(_A , len(_A ) ): snake_case_ = digits[j] + addend if s >= 10: snake_case_ , snake_case_ = divmod(_A , 10 ) snake_case_ = addend // 10 + quotient else: snake_case_ = s snake_case_ = addend // 10 if addend == 0: break while addend > 0: snake_case_ , snake_case_ = divmod(_A , 10 ) digits.append(_A ) def lowerCamelCase__ ( _A = 10**15 ): '''simple docstring''' snake_case_ = [1] snake_case_ = 1 snake_case_ = 0 while True: snake_case_ , snake_case_ = next_term(_A , 20 , i + dn , _A ) dn += terms_jumped if dn == n - i: break snake_case_ = 0 for j in range(len(_A ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f'''{solution() = }''')
139
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case__ ) class __UpperCAmelCase ( snake_case__ ): '''simple docstring''' _UpperCamelCase = field(default="""language-modeling""" ,metadata={"""include_in_asdict_even_if_is_default""": True} ) _UpperCamelCase = Features({"""text""": Value("""string""" )} ) _UpperCamelCase = Features({} ) _UpperCamelCase = "text" @property def __snake_case ( self : str) -> List[str]: return {self.text_column: "text"}
366
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple ) -> Optional[Any]: '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read() _check_parquet_dataset(lowercase__ , lowercase__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> Any: '''simple docstring''' lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ = features.copy() if features else default_expected_features lowerCAmelCase__ = ( Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read() _check_parquet_dataset(lowercase__ , lowercase__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read() _check_parquet_dataset(lowercase__ , lowercase__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Any: '''simple docstring''' if issubclass(lowercase__ , lowercase__ ): lowerCAmelCase__ = parquet_path elif issubclass(lowercase__ , lowercase__ ): lowerCAmelCase__ = [parquet_path] lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read() _check_parquet_dataset(lowercase__ , lowercase__ ) def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=("train",) ) -> Union[str, Any]: '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) for split in splits: lowerCAmelCase__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read() _check_parquet_datasetdict(lowercase__ , lowercase__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ = features.copy() if features else default_expected_features lowerCAmelCase__ = ( Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read() _check_parquet_datasetdict(lowercase__ , lowercase__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> int: '''simple docstring''' if split: lowerCAmelCase__ = {split: parquet_path} else: lowerCAmelCase__ = '''train''' lowerCAmelCase__ = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read() _check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]: '''simple docstring''' lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ = pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ = {'''image''': [image_path]} lowerCAmelCase__ = Features({'''image''': Image()} ) lowerCAmelCase__ = Dataset.from_dict(lowercase__ , features=lowercase__ ) lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase__ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : str ) -> Tuple: '''simple docstring''' assert get_writer_batch_size(lowercase__ ) == expected
668
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return (data["data"], data["target"]) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = XGBClassifier() classifier.fit(_lowercase , _lowercase ) return classifier def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = load_iris() UpperCAmelCase_, UpperCAmelCase_ : Any = data_handling(_lowercase ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = train_test_split( _lowercase , _lowercase , test_size=0.25 ) UpperCAmelCase_ : Dict = iris['''target_names'''] # Create an XGBoost Classifier from the training data UpperCAmelCase_ : int = xgboost(_lowercase , _lowercase ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( _lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap='''Blues''' , normalize='''true''' , ) plt.title('''Normalized Confusion Matrix - IRIS Dataset''' ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
300
from __future__ import annotations def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_, UpperCAmelCase_ : List[str] = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCAmelCase_ : Optional[Any] = result + left + right return input_list def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if len(_lowercase ) <= 1: return input_list UpperCAmelCase_ : List[Any] = list(_lowercase ) # iteration for two-way merging UpperCAmelCase_ : Dict = 2 while p <= len(_lowercase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(_lowercase ) , _lowercase ): UpperCAmelCase_ : Union[str, Any] = i UpperCAmelCase_ : Union[str, Any] = i + p - 1 UpperCAmelCase_ : Optional[int] = (low + high + 1) // 2 UpperCAmelCase_ : Tuple = merge(_lowercase , _lowercase , _lowercase , _lowercase ) # final merge of last two parts if p * 2 >= len(_lowercase ): UpperCAmelCase_ : List[Any] = i UpperCAmelCase_ : Tuple = merge(_lowercase , 0 , _lowercase , len(_lowercase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() if user_input == "": __a = [] else: __a = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
300
1
import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> Any: def get_masked_lm_array(lowercase_ ): _snake_case : Optional[int] = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' _snake_case : str = tf.train.load_variable(lowercase_ , lowercase_ ) if "kernel" in name: _snake_case : Any = array.transpose() return torch.from_numpy(lowercase_ ) def get_encoder_array(lowercase_ ): _snake_case : List[Any] = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' _snake_case : Optional[int] = tf.train.load_variable(lowercase_ , lowercase_ ) if "kernel" in name: _snake_case : List[Any] = array.transpose() return torch.from_numpy(lowercase_ ) def get_encoder_layer_array(lowercase_ , lowercase_ ): _snake_case : Optional[Any] = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' _snake_case : List[str] = tf.train.load_variable(lowercase_ , lowercase_ ) if "kernel" in name: _snake_case : List[str] = array.transpose() return torch.from_numpy(lowercase_ ) def get_encoder_attention_layer_array(lowercase_ , lowercase_ , lowercase_ ): _snake_case : int = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' _snake_case : Tuple = tf.train.load_variable(lowercase_ , lowercase_ ) _snake_case : Dict = array.reshape(lowercase_ ) if "kernel" in name: _snake_case : Optional[Any] = array.transpose() return torch.from_numpy(lowercase_ ) print(f'''Loading model based on config from {config_path}...''' ) _snake_case : List[Any] = BertConfig.from_json_file(lowercase_ ) _snake_case : Union[str, Any] = BertForMaskedLM(lowercase_ ) # Layers for layer_index in range(0 , config.num_hidden_layers ): _snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention _snake_case : BertSelfAttention = layer.attention.self _snake_case : str = get_encoder_attention_layer_array( lowercase_ , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) _snake_case : str = get_encoder_attention_layer_array( lowercase_ , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) _snake_case : Any = get_encoder_attention_layer_array( lowercase_ , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) _snake_case : Optional[Any] = get_encoder_attention_layer_array( lowercase_ , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) _snake_case : int = get_encoder_attention_layer_array( lowercase_ , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) _snake_case : List[Any] = get_encoder_attention_layer_array( lowercase_ , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output _snake_case : BertSelfOutput = layer.attention.output _snake_case : List[str] = get_encoder_attention_layer_array( lowercase_ , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) _snake_case : Union[str, Any] = get_encoder_attention_layer_array( lowercase_ , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) _snake_case : Dict = get_encoder_layer_array(lowercase_ , '''_attention_layer_norm/gamma''' ) _snake_case : Union[str, Any] = get_encoder_layer_array(lowercase_ , '''_attention_layer_norm/beta''' ) # Intermediate _snake_case : BertIntermediate = layer.intermediate _snake_case : Any = get_encoder_layer_array(lowercase_ , '''_intermediate_dense/kernel''' ) _snake_case : Any = get_encoder_layer_array(lowercase_ , '''_intermediate_dense/bias''' ) # Output _snake_case : BertOutput = layer.output _snake_case : Tuple = get_encoder_layer_array(lowercase_ , '''_output_dense/kernel''' ) _snake_case : Union[str, Any] = get_encoder_layer_array(lowercase_ , '''_output_dense/bias''' ) _snake_case : Any = get_encoder_layer_array(lowercase_ , '''_output_layer_norm/gamma''' ) _snake_case : Tuple = get_encoder_layer_array(lowercase_ , '''_output_layer_norm/beta''' ) # Embeddings _snake_case : str = get_encoder_array('''_position_embedding_layer/embeddings''' ) _snake_case : Tuple = get_encoder_array('''_type_embedding_layer/embeddings''' ) _snake_case : Optional[Any] = get_encoder_array('''_embedding_norm_layer/gamma''' ) _snake_case : Union[str, Any] = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head _snake_case : str = model.cls.predictions.transform _snake_case : int = get_masked_lm_array('''dense/kernel''' ) _snake_case : Optional[Any] = get_masked_lm_array('''dense/bias''' ) _snake_case : Optional[Any] = get_masked_lm_array('''layer_norm/gamma''' ) _snake_case : int = get_masked_lm_array('''layer_norm/beta''' ) _snake_case : List[str] = get_masked_lm_array('''embedding_table''' ) # Pooling _snake_case : int = BertPooler(config=lowercase_ ) _snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) _snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(lowercase_ ) # Integration test - should load without any errors ;) _snake_case : Tuple = BertForMaskedLM.from_pretrained(lowercase_ ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) lowerCAmelCase_ = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
326
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( """The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ ,__UpperCAmelCase ,) class A (__UpperCAmelCase ): _SCREAMING_SNAKE_CASE = RobertaConfig _SCREAMING_SNAKE_CASE = """roberta""" def __init__( self , lowercase_ ) -> Any: '''simple docstring''' super().__init__(lowercase_ ) _snake_case : str = RobertaEmbeddings(lowercase_ ) self.init_weights() @add_start_docstrings( """RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. """ ,__UpperCAmelCase ,) class A (__UpperCAmelCase ): _SCREAMING_SNAKE_CASE = RobertaConfig _SCREAMING_SNAKE_CASE = """roberta""" def __init__( self , lowercase_ ) -> str: '''simple docstring''' super().__init__(lowercase_ ) _snake_case : List[str] = config.num_labels _snake_case : List[str] = config.num_hidden_layers _snake_case : Dict = DeeRobertaModel(lowercase_ ) _snake_case : Dict = nn.Dropout(config.hidden_dropout_prob ) _snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(lowercase_ ) def __a ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=-1 , lowercase_=False , ) -> Optional[Any]: '''simple docstring''' _snake_case : Dict = self.num_layers try: _snake_case : Any = self.roberta( lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , ) _snake_case : List[Any] = outputs[1] _snake_case : Optional[Any] = self.dropout(lowercase_ ) _snake_case : Any = self.classifier(lowercase_ ) _snake_case : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _snake_case : Dict = e.message _snake_case : Any = e.exit_layer _snake_case : Tuple = outputs[0] if not self.training: _snake_case : Tuple = entropy(lowercase_ ) _snake_case : str = [] _snake_case : List[Any] = [] if labels is not None: if self.num_labels == 1: # We are doing regression _snake_case : List[str] = MSELoss() _snake_case : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _snake_case : Tuple = CrossEntropyLoss() _snake_case : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _snake_case : int = [] for highway_exit in outputs[-1]: _snake_case : List[str] = highway_exit[0] if not self.training: highway_logits_all.append(lowercase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _snake_case : Optional[Any] = MSELoss() _snake_case : List[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _snake_case : Optional[Any] = CrossEntropyLoss() _snake_case : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(lowercase_ ) if train_highway: _snake_case : Tuple = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _snake_case : List[str] = (loss,) + outputs if not self.training: _snake_case : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _snake_case : Optional[Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
326
1
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar("""T""") lowerCAmelCase_ = TypeVar("""U""") class _lowerCAmelCase ( Generic[T, U] ): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase : T | None , UpperCamelCase : U | None ): '''simple docstring''' _snake_case : Optional[Any] = key _snake_case : List[Any] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self : Union[str, Any] ): '''simple docstring''' return ( f"""Node: key: {self.key}, val: {self.val}, """ f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}""" ) class _lowerCAmelCase ( Generic[T, U] ): '''simple docstring''' def __init__( self : str ): '''simple docstring''' _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase ) _snake_case : List[str] = self.rear, self.head def __repr__( self : Optional[int] ): '''simple docstring''' _snake_case : List[str] = ['DoubleLinkedList'] _snake_case : Dict = self.head while node.next is not None: rep.append(str(UpperCamelCase ) ) _snake_case : Dict = node.next rep.append(str(self.rear ) ) return ",\n ".join(UpperCamelCase ) def UpperCamelCase_ ( self : Dict , UpperCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' _snake_case : str = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Tuple = node _snake_case : int = previous _snake_case : int = node _snake_case : int = self.rear def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' if node.prev is None or node.next is None: return None _snake_case : str = node.next _snake_case : List[str] = node.prev _snake_case : Union[str, Any] = None _snake_case : Optional[int] = None return node class _lowerCAmelCase ( Generic[T, U] ): '''simple docstring''' a_ : dict[Callable[[T], U], LRUCache[T, U]] ={} def __init__( self : List[Any] , UpperCamelCase : int ): '''simple docstring''' _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Dict = capacity _snake_case : Tuple = 0 _snake_case : Optional[Any] = 0 _snake_case : Optional[Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self : Dict ): '''simple docstring''' return ( f"""CacheInfo(hits={self.hits}, misses={self.miss}, """ f"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self : List[str] , UpperCamelCase : T ): '''simple docstring''' return key in self.cache def UpperCamelCase_ ( self : Any , UpperCamelCase : T ): '''simple docstring''' if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Dict = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(UpperCamelCase ) return node.val self.miss += 1 return None def UpperCamelCase_ ( self : int , UpperCamelCase : T , UpperCamelCase : U ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(UpperCamelCase ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Any = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(UpperCamelCase ) @classmethod def UpperCamelCase_ ( cls : str , UpperCamelCase : int = 1_28 ): '''simple docstring''' def cache_decorator_inner(UpperCamelCase : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*UpperCamelCase : T ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Union[str, Any] = LRUCache(UpperCamelCase ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Any = func(*UpperCamelCase ) cls.decorator_function_to_instance_map[func].put(args[0] , UpperCamelCase ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(UpperCamelCase , 'cache_info' , UpperCamelCase ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
719
from __future__ import annotations from random import random class _lowerCAmelCase : '''simple docstring''' def __init__( self : Dict , UpperCamelCase : int | None = None ): '''simple docstring''' _snake_case : str = value _snake_case : List[Any] = random() _snake_case : Node | None = None _snake_case : Node | None = None def __repr__( self : Optional[Any] ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return f"""'{self.value}: {self.prior:.5}'""" else: return pformat( {f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__( self : Dict ): '''simple docstring''' _snake_case : List[str] = str(self.value ) + ' ' _snake_case : List[Any] = str(self.left or '' ) _snake_case : int = str(self.right or '' ) return value + left + right def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]: if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: _snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase ) return left, root else: _snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase ) return root, right def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None: if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: _snake_case : str = merge(left.right , lowerCAmelCase ) return left else: _snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left ) return right def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None: _snake_case : Tuple = Node(lowerCAmelCase ) _snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase ) return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ) def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None: _snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 ) _snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase ) return merge(lowerCAmelCase , lowerCAmelCase ) def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None: if not root: # None return else: inorder(root.left ) print(root.value , end=',' ) inorder(root.right ) def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None: for arg in args.split(): if arg[0] == "+": _snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) ) elif arg[0] == "-": _snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) ) else: print('Unknown command' ) return root def lowerCamelCase_ ( )-> None: _snake_case : Tuple = None print( 'enter numbers to create a tree, + value to add value into treap, ' '- value to erase all nodes with value. \'q\' to quit. ' ) _snake_case : List[Any] = input() while args != "q": _snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase ) print(lowerCAmelCase ) _snake_case : Tuple = input() print('good by!' ) if __name__ == "__main__": import doctest doctest.testmod() main()
669
0
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A ( lowerCamelCase__ ): def __init__( self: List[str] , _lowerCAmelCase: Union[str, "sqlalchemy.sql.Selectable"] , _lowerCAmelCase: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _lowerCAmelCase: Optional[Features] = None , _lowerCAmelCase: str = None , _lowerCAmelCase: bool = False , **_lowerCAmelCase: Optional[int] , ) -> str: '''simple docstring''' super().__init__(features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , **lowerCAmelCase__ ) UpperCAmelCase_ =Sql( cache_dir=lowerCAmelCase__ , features=lowerCAmelCase__ , sql=lowerCAmelCase__ , con=lowerCAmelCase__ , **lowerCAmelCase__ , ) def lowerCAmelCase__ ( self: Union[str, Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ =None UpperCAmelCase_ =None UpperCAmelCase_ =None UpperCAmelCase_ =None self.builder.download_and_prepare( download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , ) # Build dataset for splits UpperCAmelCase_ =self.builder.as_dataset( split="train" , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory ) return dataset class A : def __init__( self: Tuple , _lowerCAmelCase: Dataset , _lowerCAmelCase: str , _lowerCAmelCase: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[int] = None , **_lowerCAmelCase: List[str] , ) -> Any: '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F'num_proc {num_proc} must be an integer > 0.' ) UpperCAmelCase_ =dataset UpperCAmelCase_ =name UpperCAmelCase_ =con UpperCAmelCase_ =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase_ =num_proc UpperCAmelCase_ =to_sql_kwargs def lowerCAmelCase__ ( self: List[str] ) -> Dict: '''simple docstring''' UpperCAmelCase_ =self.to_sql_kwargs.pop("sql" , lowerCAmelCase__ ) UpperCAmelCase_ =self.to_sql_kwargs.pop("con" , lowerCAmelCase__ ) UpperCAmelCase_ =self.to_sql_kwargs.pop("index" , lowerCAmelCase__ ) UpperCAmelCase_ =self._write(index=lowerCAmelCase__ , **self.to_sql_kwargs ) return written def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[Any] ) -> str: '''simple docstring''' UpperCAmelCase_ =args UpperCAmelCase_ ={**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs UpperCAmelCase_ =query_table( table=self.dataset.data , key=slice(lowerCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) UpperCAmelCase_ =batch.to_pandas() UpperCAmelCase_ =df.to_sql(self.name , self.con , index=lowerCAmelCase__ , **lowerCAmelCase__ ) return num_rows or len(lowerCAmelCase__ ) def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: str , **_lowerCAmelCase: Union[str, Any] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ =0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: UpperCAmelCase_ =len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCAmelCase__ , lowerCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
54
'''simple docstring''' def lowerCAmelCase_ ( _lowerCamelCase: list ): if len(_lowerCamelCase ) < 2: return collection def circle_sort_util(_lowerCamelCase: list , _lowerCamelCase: int , _lowerCamelCase: int ) -> bool: __SCREAMING_SNAKE_CASE : Any = False if low == high: return swapped __SCREAMING_SNAKE_CASE : Any = low __SCREAMING_SNAKE_CASE : Dict = high while left < right: if collection[left] > collection[right]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = ( collection[right], collection[left], ) __SCREAMING_SNAKE_CASE : str = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = ( collection[right + 1], collection[left], ) __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : Union[str, Any] = low + int((high - low) / 2 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = circle_sort_util(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __SCREAMING_SNAKE_CASE : Optional[int] = circle_sort_util(_lowerCamelCase , mid + 1 , _lowerCamelCase ) return swapped or left_swap or right_swap __SCREAMING_SNAKE_CASE : Optional[Any] = True while is_not_sorted is True: __SCREAMING_SNAKE_CASE : Tuple = circle_sort_util(_lowerCamelCase , 0 , len(_lowerCamelCase ) - 1 ) return collection if __name__ == "__main__": UpperCamelCase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase__ : List[Any] = [int(item) for item in user_input.split(''',''')] print(circle_sort(unsorted))
578
0
'''simple docstring''' import os import sys a_ : Union[str, Any] = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) a_ : Any = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def __snake_case ( *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ): return AutoConfig.from_pretrained(*UpperCAmelCase_ , **UpperCAmelCase_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __snake_case ( *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ): return AutoTokenizer.from_pretrained(*UpperCAmelCase_ , **UpperCAmelCase_ ) @add_start_docstrings(AutoModel.__doc__ ) def __snake_case ( *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ): return AutoModel.from_pretrained(*UpperCAmelCase_ , **UpperCAmelCase_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __snake_case ( *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ): return AutoModelForCausalLM.from_pretrained(*UpperCAmelCase_ , **UpperCAmelCase_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __snake_case ( *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ): return AutoModelForMaskedLM.from_pretrained(*UpperCAmelCase_ , **UpperCAmelCase_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __snake_case ( *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int ): return AutoModelForSequenceClassification.from_pretrained(*UpperCAmelCase_ , **UpperCAmelCase_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __snake_case ( *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ): return AutoModelForQuestionAnswering.from_pretrained(*UpperCAmelCase_ , **UpperCAmelCase_ )
445
'''simple docstring''' from __future__ import annotations import pandas as pd def __snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ): lowerCamelCase_ = [0] * no_of_processes lowerCamelCase_ = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(UpperCAmelCase_ ): lowerCamelCase_ = burst_time[i] lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 999999999 lowerCamelCase_ = 0 lowerCamelCase_ = False # Process until all processes are completed while complete != no_of_processes: for j in range(UpperCAmelCase_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: lowerCamelCase_ = remaining_time[j] lowerCamelCase_ = j lowerCamelCase_ = True if not check: increment_time += 1 continue remaining_time[short] -= 1 lowerCamelCase_ = remaining_time[short] if minm == 0: lowerCamelCase_ = 999999999 if remaining_time[short] == 0: complete += 1 lowerCamelCase_ = False # Find finish time of current process lowerCamelCase_ = increment_time + 1 # Calculate waiting time lowerCamelCase_ = finish_time - arrival_time[short] lowerCamelCase_ = finar - burst_time[short] if waiting_time[short] < 0: lowerCamelCase_ = 0 # Increment time increment_time += 1 return waiting_time def __snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] ): lowerCamelCase_ = [0] * no_of_processes for i in range(UpperCAmelCase_ ): lowerCamelCase_ = burst_time[i] + waiting_time[i] return turn_around_time def __snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ): lowerCamelCase_ = 0 lowerCamelCase_ = 0 for i in range(UpperCAmelCase_ ): lowerCamelCase_ = total_waiting_time + waiting_time[i] lowerCamelCase_ = total_turn_around_time + turn_around_time[i] print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' ) print("Average turn around time =" , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("""Enter how many process you want to analyze""") a_ : Dict = int(input()) a_ : Any = [0] * no_of_processes a_ : Optional[int] = [0] * no_of_processes a_ : Tuple = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("""Enter the arrival time and burst time for process:--""" + str(i + 1)) a_ , a_ : str = map(int, input().split()) a_ : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) a_ : int = burst_time a_ : Union[str, Any] = no_of_processes a_ : Optional[int] = waiting_time a_ : Any = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) a_ : Optional[int] = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ """Process""", """BurstTime""", """ArrivalTime""", """WaitingTime""", """TurnAroundTime""", ], ) # Printing the dataFrame pd.set_option("""display.max_rows""", fcfs.shape[0] + 1) print(fcfs)
445
1
'''simple docstring''' from __future__ import annotations def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' __SCREAMING_SNAKE_CASE = str(__UpperCAmelCase ) return n == n[::-1] def __magic_name__ ( __UpperCAmelCase = 1000000 ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 for i in range(1 , __UpperCAmelCase ): if is_palindrome(__UpperCAmelCase ) and is_palindrome(bin(__UpperCAmelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
109
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def lowerCamelCase_ (UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : list ): _UpperCAmelCase : List[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCamelCase__ )] ) _UpperCAmelCase : Tuple = np.array(UpperCamelCase__ ) _UpperCAmelCase : Any = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCamelCase__ ) ) , x.transpose() ) , UpperCamelCase__ ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def lowerCamelCase_ (UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : list ): _UpperCAmelCase : Tuple = (1, 2, 1) _UpperCAmelCase : Tuple = (1, 1, 0, 7) _UpperCAmelCase : Tuple = SARIMAX( UpperCamelCase__ , exog=UpperCamelCase__ , order=UpperCamelCase__ , seasonal_order=UpperCamelCase__ ) _UpperCAmelCase : Any = model.fit(disp=UpperCamelCase__ , maxiter=600 , method='''nm''' ) _UpperCAmelCase : int = model_fit.predict(1 , len(UpperCamelCase__ ) , exog=[test_match] ) return result[0] def lowerCamelCase_ (UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : list ): _UpperCAmelCase : str = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : Dict = regressor.predict(UpperCamelCase__ ) return y_pred[0] def lowerCamelCase_ (UpperCamelCase__ : list ): train_user.sort() _UpperCAmelCase : Union[str, Any] = np.percentile(UpperCamelCase__ , 25 ) _UpperCAmelCase : Optional[int] = np.percentile(UpperCamelCase__ , 75 ) _UpperCAmelCase : Dict = qa - qa _UpperCAmelCase : List[str] = qa - (iqr * 0.1) return low_lim def lowerCamelCase_ (UpperCamelCase__ : list , UpperCamelCase__ : float ): _UpperCAmelCase : Dict = 0 _UpperCAmelCase : Dict = 0 for i in list_vote: if i > actual_result: _UpperCAmelCase : str = not_safe + 1 else: if abs(abs(UpperCamelCase__ ) - abs(UpperCamelCase__ ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) _lowerCAmelCase :Any = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]] _lowerCAmelCase :str = pd.DataFrame( data_input, columns=['total_user', 'total_even', 'days'] ) _lowerCAmelCase :Dict = Normalizer().fit_transform(data_input_df.values) # split data _lowerCAmelCase :Optional[Any] = normalize_df[:, 2].tolist() _lowerCAmelCase :Optional[Any] = normalize_df[:, 0].tolist() _lowerCAmelCase :Tuple = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) _lowerCAmelCase :Union[str, Any] = normalize_df[:, [1, 2]].tolist() _lowerCAmelCase :str = x[: len(x) - 1] _lowerCAmelCase :Dict = x[len(x) - 1 :] # for linear regression & sarimax _lowerCAmelCase :Dict = total_date[: len(total_date) - 1] _lowerCAmelCase :List[Any] = total_user[: len(total_user) - 1] _lowerCAmelCase :Dict = total_match[: len(total_match) - 1] _lowerCAmelCase :Optional[Any] = total_date[len(total_date) - 1 :] _lowerCAmelCase :List[str] = total_user[len(total_user) - 1 :] _lowerCAmelCase :str = total_match[len(total_match) - 1 :] # voting system with forecasting _lowerCAmelCase :Union[str, Any] = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data _lowerCAmelCase :Any = '' if data_safety_checker(res_vote, tst_user) else 'not ' print('Today\'s data is {not_str}safe.')
506
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ : List[Any] = logging.get_logger(__name__) UpperCamelCase_ : Union[str, Any] = { '''caidas/swin2sr-classicalsr-x2-64''': ( '''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json''' ), } class __lowerCAmelCase ( _lowercase ): """simple docstring""" snake_case = "swin2sr" snake_case = { "hidden_size": "embed_dim", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Optional[Any] , _snake_case : Optional[int]=64 , _snake_case : Tuple=1 , _snake_case : str=3 , _snake_case : Optional[int]=180 , _snake_case : Dict=[6, 6, 6, 6, 6, 6] , _snake_case : List[str]=[6, 6, 6, 6, 6, 6] , _snake_case : List[str]=8 , _snake_case : Optional[Any]=2.0 , _snake_case : Optional[int]=True , _snake_case : Optional[int]=0.0 , _snake_case : Dict=0.0 , _snake_case : Tuple=0.1 , _snake_case : Dict="gelu" , _snake_case : Tuple=False , _snake_case : Tuple=0.0_2 , _snake_case : List[Any]=1e-5 , _snake_case : List[Any]=2 , _snake_case : Tuple=1.0 , _snake_case : Any="1conv" , _snake_case : Union[str, Any]="pixelshuffle" , **_snake_case : int , ) -> Optional[int]: """simple docstring""" super().__init__(**_snake_case ) A_ = image_size A_ = patch_size A_ = num_channels A_ = embed_dim A_ = depths A_ = len(_snake_case ) A_ = num_heads A_ = window_size A_ = mlp_ratio A_ = qkv_bias A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = drop_path_rate A_ = hidden_act A_ = use_absolute_embeddings A_ = layer_norm_eps A_ = initializer_range A_ = upscale A_ = img_range A_ = resi_connection A_ = upsampler
482
"""simple docstring""" def A_ (__a , __a , __a ): '''simple docstring''' A_ = len(__a ) A_ = [[0] * n for i in range(__a )] for i in range(__a ): A_ = y_points[i] for i in range(2 , __a ): for j in range(__a , __a ): A_ = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
482
1
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : Optional[int] = "https://openaipublic.azureedge.net/jukebox/models/" lowerCamelCase : str = { "jukebox-1b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "1b_lyrics/prior_level_2.pth.tar", ], "jukebox-5b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "5b_lyrics/prior_level_2.pth.tar", ], } def _SCREAMING_SNAKE_CASE ( lowercase : Any ): '''simple docstring''' if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10: lowerCamelCase_ = key.replace('.model.1.bias' , '.conv1d_1.bias' ) elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10: lowerCamelCase_ = key.replace('.model.1.weight' , '.conv1d_1.weight' ) elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10: lowerCamelCase_ = key.replace('.model.3.bias' , '.conv1d_2.bias' ) elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10: lowerCamelCase_ = key.replace('.model.3.weight' , '.conv1d_2.weight' ) if "conditioner_blocks.0." in key: lowerCamelCase_ = key.replace('conditioner_blocks.0' , 'conditioner_blocks' ) if "prime_prior" in key: lowerCamelCase_ = key.replace('prime_prior' , 'encoder' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowerCamelCase_ = key.replace('.emb.' , '.' ) if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('.k' , '.codebook' ) if "y_emb." in key: return key.replace('y_emb.' , 'metadata_embedding.' ) if "x_emb.emb." in key: lowerCamelCase_ = key.replace('0.x_emb.emb' , 'embed_tokens' ) if "prime_state_ln" in key: return key.replace('prime_state_ln' , 'encoder.final_layer_norm' ) if ".ln" in key: return key.replace('.ln' , '.layer_norm' ) if "_ln" in key: return key.replace('_ln' , '_layer_norm' ) if "prime_state_proj" in key: return key.replace('prime_state_proj' , 'encoder.proj_in' ) if "prime_x_out" in key: return key.replace('prime_x_out' , 'encoder.lm_head' ) if "prior.x_out" in key: return key.replace('x_out' , 'fc_proj_out' ) if "x_emb" in key: return key.replace('x_emb' , 'embed_tokens' ) return key def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Any , lowercase : Union[str, Any] , lowercase : Tuple ): '''simple docstring''' lowerCamelCase_ = {} import re lowerCamelCase_ = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) lowerCamelCase_ = re.compile( r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) lowerCamelCase_ = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) lowerCamelCase_ = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) lowerCamelCase_ = re.compile( r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) lowerCamelCase_ = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) lowerCamelCase_ = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' ) lowerCamelCase_ = re.compile( r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) lowerCamelCase_ = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(lowercase ): lowerCamelCase_ = re_encoder_block_conv_in.match(lowercase ) lowerCamelCase_ = regex_match.groups() lowerCamelCase_ = int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}""" lowerCamelCase_ = re_encoder_block_conv_in.sub(lowercase , lowercase ) elif re_encoder_block_resnet.fullmatch(lowercase ): lowerCamelCase_ = re_encoder_block_resnet.match(lowercase ) lowerCamelCase_ = regex_match.groups() lowerCamelCase_ = int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ = {'1': 1, '3': 2}[groups[-2]] lowerCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.""" lowerCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" lowerCamelCase_ = prefix + resnet_block lowerCamelCase_ = re_encoder_block_resnet.sub(lowercase , lowercase ) elif re_encoder_block_proj_out.fullmatch(lowercase ): lowerCamelCase_ = re_encoder_block_proj_out.match(lowercase ) lowerCamelCase_ = regex_match.groups() lowerCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}""" lowerCamelCase_ = re_encoder_block_proj_out.sub(lowercase , lowercase ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(lowercase ): lowerCamelCase_ = re_decoder_block_conv_out.match(lowercase ) lowerCamelCase_ = regex_match.groups() lowerCamelCase_ = int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}""" lowerCamelCase_ = re_decoder_block_conv_out.sub(lowercase , lowercase ) elif re_decoder_block_resnet.fullmatch(lowercase ): lowerCamelCase_ = re_decoder_block_resnet.match(lowercase ) lowerCamelCase_ = regex_match.groups() lowerCamelCase_ = int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ = {'1': 1, '3': 2}[groups[-2]] lowerCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.""" lowerCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" lowerCamelCase_ = prefix + resnet_block lowerCamelCase_ = re_decoder_block_resnet.sub(lowercase , lowercase ) elif re_decoder_block_proj_in.fullmatch(lowercase ): lowerCamelCase_ = re_decoder_block_proj_in.match(lowercase ) lowerCamelCase_ = regex_match.groups() lowerCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}""" lowerCamelCase_ = re_decoder_block_proj_in.sub(lowercase , lowercase ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(lowercase ): lowerCamelCase_ = re_prior_cond_conv_out.match(lowercase ) lowerCamelCase_ = regex_match.groups() lowerCamelCase_ = int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}""" lowerCamelCase_ = re_prior_cond_conv_out.sub(lowercase , lowercase ) elif re_prior_cond_resnet.fullmatch(lowercase ): lowerCamelCase_ = re_prior_cond_resnet.match(lowercase ) lowerCamelCase_ = regex_match.groups() lowerCamelCase_ = int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ = {'1': 1, '3': 2}[groups[-2]] lowerCamelCase_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.""" lowerCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" lowerCamelCase_ = prefix + resnet_block lowerCamelCase_ = re_prior_cond_resnet.sub(lowercase , lowercase ) elif re_prior_cond_proj_in.fullmatch(lowercase ): lowerCamelCase_ = re_prior_cond_proj_in.match(lowercase ) lowerCamelCase_ = regex_match.groups() lowerCamelCase_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}""" lowerCamelCase_ = re_prior_cond_proj_in.sub(lowercase , lowercase ) # keep original key else: lowerCamelCase_ = original_key lowerCamelCase_ = replace_key(lowercase ) if f"""{key_prefix}.{key}""" not in model_state_dict or key is None: print(f"""failed converting {original_key} to {key}, does not match""" ) # handle missmatched shape elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape: lowerCamelCase_ = model_state_dict[f"""{key_prefix}.{key}"""] print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" ) lowerCamelCase_ = original_key lowerCamelCase_ = original_key lowerCamelCase_ = value return new_dict @torch.no_grad() def _SCREAMING_SNAKE_CASE ( lowercase : List[Any]=None , lowercase : List[str]=None ): '''simple docstring''' for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ): lowerCamelCase_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=lowercase ) os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=lowercase ) open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , 'wb' ).write(r.content ) lowerCamelCase_ = MODEL_MAPPING[model_name.split('/' )[-1]] lowerCamelCase_ = JukeboxConfig.from_pretrained(lowercase ) lowerCamelCase_ = JukeboxModel(lowercase ) lowerCamelCase_ = [] lowerCamelCase_ = {} for i, dict_name in enumerate(lowercase ): lowerCamelCase_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['model'] lowerCamelCase_ = {} for k in old_dic.keys(): if k.endswith('.b' ): lowerCamelCase_ = old_dic[k] elif k.endswith('.w' ): lowerCamelCase_ = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowerCamelCase_ = old_dic[k] else: lowerCamelCase_ = old_dic[k] lowerCamelCase_ = 'vqvae' if i == 0 else f"""priors.{3 - i}""" lowerCamelCase_ = fix_jukebox_keys(lowercase , model.state_dict() , lowercase , lowercase ) weight_dict.append(lowercase ) lowerCamelCase_ = weight_dict.pop(0 ) model.vqvae.load_state_dict(lowercase ) for i in range(len(lowercase ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(lowercase ).mkdir(exist_ok=lowercase ) with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile: json.dump(lowercase , lowercase ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase ) return weight_dict if __name__ == "__main__": lowerCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="jukebox-5b-lyrics", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="jukebox-5b-lyrics-converted", type=str, help="Path to the output PyTorch model directory.", ) lowerCamelCase : List[str] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
70
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = BertTokenizer UpperCamelCase = BertTokenizerFast UpperCamelCase = True UpperCamelCase = True UpperCamelCase = filter_non_english def a__ ( self : List[Any] ) -> int: """simple docstring""" super().setUp() lowerCamelCase_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def a__ ( self : Tuple , A_ : List[str] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = 'unwanted, running' return input_text, output_text def a__ ( self : Any ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.tokenizer_class(self.vocab_file ) lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" if not self.test_rust_tokenizer: return lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = tokenizer.tokenize(A_ ) lowerCamelCase_ = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = tokenizer.encode(A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) # With lower casing lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ ) lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ ) lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = tokenizer.tokenize(A_ ) lowerCamelCase_ = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = tokenizer.encode(A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) def a__ ( self : Any ) -> Dict: """simple docstring""" lowerCamelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def a__ ( self : Dict ) -> int: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : str ) -> int: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def a__ ( self : Optional[int] ) -> Any: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : Tuple ) -> str: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : int ) -> List[Any]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : str ) -> List[str]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : Dict ) -> Any: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : int ) -> Any: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def a__ ( self : List[Any] ) -> int: """simple docstring""" lowerCamelCase_ = BasicTokenizer() lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.' lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(A_ ) , A_ ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] lowerCamelCase_ = {} for i, token in enumerate(A_ ): lowerCamelCase_ = i lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def a__ ( self : List[Any] ) -> int: """simple docstring""" self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def a__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def a__ ( self : int ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def a__ ( self : Any ) -> int: """simple docstring""" lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' ) lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def a__ ( self : str ) -> str: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" lowerCamelCase_ = tokenizer_r.encode_plus( A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , ) lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False lowerCamelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = ['的', '人', '有'] lowerCamelCase_ = ''.join(A_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ = True lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ ) lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(A_ , A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = False lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ ) lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ ) # it is expected that only the first Chinese character is not preceded by "##". lowerCamelCase_ = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ ) ] self.assertListEqual(A_ , A_ ) self.assertListEqual(A_ , A_ )
70
1
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def _lowerCAmelCase ( A__ , A__ , A__ ): if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , A__ ) lowercase__ = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowercase__ = dataset_size < in_memory_max_size else: lowercase__ = False lowercase__ = is_small_dataset(A__ ) assert result == expected
705
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a__ : List[str] = logging.get_logger(__name__) a__ : List[Any] = { "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ): '''simple docstring''' A : List[str] = "focalnet" def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]: """simple docstring""" super().__init__(**lowerCAmelCase) lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = embed_dim lowercase__ = use_conv_embed lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = focal_levels lowercase__ = focal_windows lowercase__ = hidden_act lowercase__ = mlp_ratio lowercase__ = hidden_dropout_prob lowercase__ = drop_path_rate lowercase__ = use_layerscale lowercase__ = layerscale_value lowercase__ = use_post_layernorm lowercase__ = use_post_layernorm_in_modulation lowercase__ = normalize_modulator lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = encoder_stride lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)] lowercase__, lowercase__ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
642
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = 42 class SCREAMING_SNAKE_CASE (a__ , a__ ): @register_to_config def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = ("DownEncoderBlock2D",) , _UpperCAmelCase = ("UpDecoderBlock2D",) , _UpperCAmelCase = (64,) , _UpperCAmelCase = 1 , _UpperCAmelCase = "silu" , _UpperCAmelCase = 3 , _UpperCAmelCase = 32 , _UpperCAmelCase = 256 , _UpperCAmelCase = 32 , _UpperCAmelCase = None , _UpperCAmelCase = 0.18215 , _UpperCAmelCase = "group" , ): '''simple docstring''' super().__init__() # pass init params to Encoder __A : Optional[int] = Encoder( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , ) __A : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels __A : Union[str, Any] = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1) __A : List[Any] = VectorQuantizer(_UpperCAmelCase , _UpperCAmelCase , beta=0.25 , remap=_UpperCAmelCase , sane_index_shape=_UpperCAmelCase) __A : Dict = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1) # pass init params to Decoder __A : Any = Decoder( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , norm_type=_UpperCAmelCase , ) @apply_forward_hook def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = True): '''simple docstring''' __A : Optional[int] = self.encoder(_UpperCAmelCase) __A : str = self.quant_conv(_UpperCAmelCase) if not return_dict: return (h,) return VQEncoderOutput(latents=_UpperCAmelCase) @apply_forward_hook def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True): '''simple docstring''' if not force_not_quantize: __A ,__A ,__A : Dict = self.quantize(_UpperCAmelCase) else: __A : int = h __A : List[Any] = self.post_quant_conv(_UpperCAmelCase) __A : Union[str, Any] = self.decoder(_UpperCAmelCase , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = True): '''simple docstring''' __A : Any = sample __A : Optional[int] = self.encode(_UpperCAmelCase).latents __A : Union[str, Any] = self.decode(_UpperCAmelCase).sample if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCAmelCase)
8
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __SCREAMING_SNAKE_CASE = tuple[int, int] class lowerCAmelCase__ : """simple docstring""" def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None: '''simple docstring''' a__ : Optional[int] = pos_x a__ : str = pos_y a__ : Optional[int] = (pos_y, pos_x) a__ : List[str] = goal_x a__ : Any = goal_y a__ : Any = g_cost a__ : Optional[int] = parent a__ : Union[str, Any] = self.calculate_heuristic() a__ : List[Any] = self.g_cost + self.h_cost def __lowerCAmelCase ( self : Union[str, Any] ) -> float: '''simple docstring''' a__ : List[str] = self.pos_x - self.goal_x a__ : List[str] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(A__ ) + abs(A__ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : List[Any] , A__ : Node ) -> bool: '''simple docstring''' return self.f_cost < other.f_cost class lowerCAmelCase__ : """simple docstring""" def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]: '''simple docstring''' a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ ) a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ ) a__ : Dict = [self.start] a__ : list[Node] = [] a__ : str = False def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]: '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() a__ : Dict = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(A__ ) self.closed_nodes.append(A__ ) a__ : List[Any] = self.get_successors(A__ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(A__ ) else: # retrieve the best current path a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(A__ ) else: self.open_nodes.append(A__ ) return [self.start.pos] def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]: '''simple docstring''' a__ : Optional[int] = [] for action in delta: a__ : List[Any] = parent.pos_x + action[1] a__ : Tuple = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) ) return successors def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]: '''simple docstring''' a__ : Union[str, Any] = node a__ : Optional[Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) a__ : Any = current_node.parent path.reverse() return path class lowerCAmelCase__ : """simple docstring""" def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None: '''simple docstring''' a__ : str = AStar(A__ , A__ ) a__ : Optional[int] = AStar(A__ , A__ ) a__ : List[str] = False def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]: '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() a__ : int = self.fwd_astar.open_nodes.pop(0 ) a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( A__ , A__ ) self.fwd_astar.closed_nodes.append(A__ ) self.bwd_astar.closed_nodes.append(A__ ) a__ : Tuple = current_bwd_node a__ : Optional[int] = current_fwd_node a__ : Optional[int] = { self.fwd_astar: self.fwd_astar.get_successors(A__ ), self.bwd_astar: self.bwd_astar.get_successors(A__ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(A__ ) else: # retrieve the best current path a__ : Optional[Any] = astar.open_nodes.pop( astar.open_nodes.index(A__ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(A__ ) else: astar.open_nodes.append(A__ ) return [self.fwd_astar.start.pos] def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]: '''simple docstring''' a__ : str = self.fwd_astar.retrace_path(A__ ) a__ : List[str] = self.bwd_astar.retrace_path(A__ ) bwd_path.pop() bwd_path.reverse() a__ : Optional[int] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __SCREAMING_SNAKE_CASE = (0, 0) __SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __SCREAMING_SNAKE_CASE = time.time() __SCREAMING_SNAKE_CASE = AStar(init, goal) __SCREAMING_SNAKE_CASE = a_star.search() __SCREAMING_SNAKE_CASE = time.time() - start_time print(f'AStar execution time = {end_time:f} seconds') __SCREAMING_SNAKE_CASE = time.time() __SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal) __SCREAMING_SNAKE_CASE = time.time() - bd_start_time print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
688
0
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed _A = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if args.student_type == "roberta": lowercase_ : List[str] = False elif args.student_type == "gpt2": lowercase_ : Any = False def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if args.student_type == "roberta": lowercase_ : Union[str, Any] = False def _UpperCamelCase ( ): lowercase_ : List[Any] = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=SCREAMING_SNAKE_CASE_ , choices=['distilbert', 'roberta', 'gpt2'] , required=SCREAMING_SNAKE_CASE_ , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=SCREAMING_SNAKE_CASE_ , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=SCREAMING_SNAKE_CASE_ , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=SCREAMING_SNAKE_CASE_ , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=SCREAMING_SNAKE_CASE_ , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=SCREAMING_SNAKE_CASE_ , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=SCREAMING_SNAKE_CASE_ , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=SCREAMING_SNAKE_CASE_ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=SCREAMING_SNAKE_CASE_ , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=SCREAMING_SNAKE_CASE_ , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=SCREAMING_SNAKE_CASE_ , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5e-4 , type=SCREAMING_SNAKE_CASE_ , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1e-6 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=SCREAMING_SNAKE_CASE_ , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=SCREAMING_SNAKE_CASE_ , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=SCREAMING_SNAKE_CASE_ , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE_ , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=SCREAMING_SNAKE_CASE_ , default=500 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=SCREAMING_SNAKE_CASE_ , default=4_000 , help='Checkpoint interval.' ) lowercase_ : Optional[Any] = parser.parse_args() sanity_checks(SCREAMING_SNAKE_CASE_ ) # ARGS # init_gpu_params(SCREAMING_SNAKE_CASE_ ) set_seed(SCREAMING_SNAKE_CASE_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , indent=4 ) git_log(args.dump_path ) lowercase_ ,lowercase_ ,lowercase_ : int = MODEL_CLASSES[args.student_type] lowercase_ ,lowercase_ ,lowercase_ : Dict = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowercase_ : List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) lowercase_ : int = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowercase_ : Dict = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE_ ) lowercase_ : Union[str, Any] = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) lowercase_ : Any = special_tok_ids lowercase_ : List[str] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: lowercase_ : Optional[int] = pickle.load(SCREAMING_SNAKE_CASE_ ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: lowercase_ : str = pickle.load(SCREAMING_SNAKE_CASE_ ) lowercase_ : Any = np.maximum(SCREAMING_SNAKE_CASE_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowercase_ : Optional[Any] = 0.0 # do not predict special tokens lowercase_ : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) else: lowercase_ : Optional[int] = None lowercase_ : Tuple = LmSeqsDataset(params=SCREAMING_SNAKE_CASE_ , data=SCREAMING_SNAKE_CASE_ ) logger.info('Data loader created.' ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) lowercase_ : Optional[int] = student_config_class.from_pretrained(args.student_config ) lowercase_ : List[Any] = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) lowercase_ : Tuple = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE_ ) else: lowercase_ : Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE_ ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # lowercase_ : Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE_ ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowercase_ : Optional[Any] = Distiller( params=SCREAMING_SNAKE_CASE_ , dataset=SCREAMING_SNAKE_CASE_ , token_probs=SCREAMING_SNAKE_CASE_ , student=SCREAMING_SNAKE_CASE_ , teacher=SCREAMING_SNAKE_CASE_ ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
438
'''simple docstring''' import os import string import sys _A = 1 << 8 _A = { 'tab': ord('\t'), 'newline': ord('\r'), 'esc': 2_7, 'up': 6_5 + ARROW_KEY_FLAG, 'down': 6_6 + ARROW_KEY_FLAG, 'right': 6_7 + ARROW_KEY_FLAG, 'left': 6_8 + ARROW_KEY_FLAG, 'mod_int': 9_1, 'undefined': sys.maxsize, 'interrupt': 3, 'insert': 5_0, 'delete': 5_1, 'pg_up': 5_3, 'pg_down': 5_4, } _A = KEYMAP['up'] _A = KEYMAP['left'] if sys.platform == "win32": _A = [] _A = { b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG, b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG, } for i in range(1_0): _A = ord(str(i)) def _UpperCamelCase ( ): if os.name == "nt": import msvcrt lowercase_ : Optional[Any] = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(SCREAMING_SNAKE_CASE_ ) == 0: # Read the keystroke lowercase_ : int = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowercase_ : int = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowercase_ : List[Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ ) if ord(SCREAMING_SNAKE_CASE_ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowercase_ : Optional[int] = chr(KEYMAP['esc'] ) except KeyError: lowercase_ : Tuple = cha[1] else: lowercase_ : Tuple = ch.decode(SCREAMING_SNAKE_CASE_ ) else: lowercase_ : Dict = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowercase_ : int = sys.stdin.fileno() lowercase_ : Union[str, Any] = termios.tcgetattr(SCREAMING_SNAKE_CASE_ ) try: tty.setraw(SCREAMING_SNAKE_CASE_ ) lowercase_ : Any = sys.stdin.read(1 ) finally: termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ ) return ch def _UpperCamelCase ( ): lowercase_ : Union[str, Any] = get_raw_chars() if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]: lowercase_ : Any = get_raw_chars() if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]: lowercase_ : Dict = get_raw_chars() if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
438
1
def A ( _lowercase = "The quick brown fox jumps over the lazy dog" , ): SCREAMING_SNAKE_CASE : Any = set() # Replace all the whitespace in our sentence SCREAMING_SNAKE_CASE : str = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCamelCase__ ) == 26 def A ( _lowercase = "The quick brown fox jumps over the lazy dog" , ): SCREAMING_SNAKE_CASE : int = [False] * 26 for char in input_str: if char.islower(): SCREAMING_SNAKE_CASE : int = True elif char.isupper(): SCREAMING_SNAKE_CASE : int = True return all(UpperCamelCase__ ) def A ( _lowercase = "The quick brown fox jumps over the lazy dog" , ): return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def A ( ): from timeit import timeit SCREAMING_SNAKE_CASE : List[str] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=UpperCamelCase__ ) ) print(timeit('''is_pangram_faster()''' , setup=UpperCamelCase__ ) ) print(timeit('''is_pangram_fastest()''' , setup=UpperCamelCase__ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
248
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _lowerCAmelCase :Dict = logging.get_logger(__name__) _lowerCAmelCase :Tuple = {'vocab_file': 'spiece.model'} _lowerCAmelCase :Optional[int] = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } _lowerCAmelCase :Optional[Any] = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) _lowerCAmelCase :Optional[Any] = 0 _lowerCAmelCase :Any = 1 _lowerCAmelCase :int = 2 _lowerCAmelCase :List[str] = 3 _lowerCAmelCase :List[Any] = 4 class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ ='''left''' def __init__( self , A , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , A = None , **A , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase : Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token _UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , ) _UpperCAmelCase : List[str] = 3 _UpperCAmelCase : Tuple = do_lower_case _UpperCAmelCase : Optional[int] = remove_space _UpperCAmelCase : Union[str, Any] = keep_accents _UpperCAmelCase : Union[str, Any] = vocab_file _UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) @property def __lowerCAmelCase ( self ) -> Optional[int]: return len(self.sp_model ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: _UpperCAmelCase : List[Any] = self.__dict__.copy() _UpperCAmelCase : Union[str, Any] = None return state def __setstate__( self , A ) -> str: _UpperCAmelCase : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase : List[Any] = {} _UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self , A ) -> Union[str, Any]: if self.remove_space: _UpperCAmelCase : List[Any] = ''' '''.join(inputs.strip().split() ) else: _UpperCAmelCase : Union[str, Any] = inputs _UpperCAmelCase : Optional[int] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: _UpperCAmelCase : Any = unicodedata.normalize('''NFKD''' , A ) _UpperCAmelCase : int = ''''''.join([c for c in outputs if not unicodedata.combining(A )] ) if self.do_lower_case: _UpperCAmelCase : str = outputs.lower() return outputs def __lowerCAmelCase ( self , A ) -> List[str]: _UpperCAmelCase : Dict = self.preprocess_text(A ) _UpperCAmelCase : Dict = self.sp_model.encode(A , out_type=A ) _UpperCAmelCase : Any = [] for piece in pieces: if len(A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): _UpperCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _UpperCAmelCase : Dict = cur_pieces[1:] else: _UpperCAmelCase : Union[str, Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(A ) else: new_pieces.append(A ) return new_pieces def __lowerCAmelCase ( self , A ) -> str: return self.sp_model.PieceToId(A ) def __lowerCAmelCase ( self , A ) -> Any: return self.sp_model.IdToPiece(A ) def __lowerCAmelCase ( self , A ) -> List[str]: _UpperCAmelCase : Optional[int] = ''''''.join(A ).replace(A , ''' ''' ).strip() return out_string def __lowerCAmelCase ( self , A , A = False , A = None , A = True , **A , ) -> str: _UpperCAmelCase : List[Any] = kwargs.pop('''use_source_tokenizer''' , A ) _UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(A , skip_special_tokens=A ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _UpperCAmelCase : Dict = [] _UpperCAmelCase : Any = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) _UpperCAmelCase : Optional[Any] = [] sub_texts.append(A ) else: current_sub_text.append(A ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _UpperCAmelCase : Dict = ''''''.join(A ) _UpperCAmelCase : Optional[Any] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _UpperCAmelCase : List[Any] = self.clean_up_tokenization(A ) return clean_text else: return text def __lowerCAmelCase ( self , A , A = None ) -> List[int]: _UpperCAmelCase : Tuple = [self.sep_token_id] _UpperCAmelCase : int = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A ) if token_ids_a is not None: return ([0] * len(A )) + [1] + ([0] * len(A )) + [1, 1] return ([0] * len(A )) + [1, 1] def __lowerCAmelCase ( self , A , A = None ) -> List[int]: _UpperCAmelCase : Any = [self.sep_token_id] _UpperCAmelCase : Dict = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: if not os.path.isdir(A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase : List[str] = os.path.join( A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A ) elif not os.path.isfile(self.vocab_file ): with open(A , '''wb''' ) as fi: _UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
506
0
"""simple docstring""" import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets UpperCAmelCase = """\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } """ UpperCAmelCase = """\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. """ UpperCAmelCase = """ Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for 'cvit-mkb-clsr' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for 'cvit-mkb-clsr' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"precision\": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} """ def lowercase ( a__ : List[Any] , a__ : Tuple ) -> str: return float((preds == labels).mean() ) def lowercase ( a__ : Optional[Any] , a__ : int ) -> Tuple: _UpperCamelCase = simple_accuracy(a__ , a__ ) _UpperCamelCase = float(fa_score(y_true=a__ , y_pred=a__ ) ) return { "accuracy": acc, "f1": fa, } def lowercase ( a__ : Optional[Any] , a__ : Optional[int] ) -> Any: _UpperCamelCase = np.array(a__ ) _UpperCamelCase = np.array(a__ ) _UpperCamelCase = en_sentvecs.shape[0] # mean centering _UpperCamelCase = en_sentvecs - np.mean(a__ , axis=0 ) _UpperCamelCase = in_sentvecs - np.mean(a__ , axis=0 ) _UpperCamelCase = cdist(a__ , a__ , '''cosine''' ) _UpperCamelCase = np.array(range(a__ ) ) _UpperCamelCase = sim.argsort(axis=1 )[:, :10] _UpperCamelCase = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCAmelCase_ ( datasets.Metric): def _UpperCamelCase ( self : Dict ) -> Dict: if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , ) def _UpperCamelCase ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ) -> List[str]: if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(__UpperCamelCase , __UpperCamelCase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(__UpperCamelCase , __UpperCamelCase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' )
342
"""simple docstring""" from functools import lru_cache @lru_cache def lowercase ( a__ : int ) -> int: if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
342
1
"""simple docstring""" import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowercase: def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" a__ = parent a__ = batch_size a__ = image_size a__ = patch_size a__ = num_channels a__ = is_training a__ = use_labels a__ = hidden_size a__ = num_hidden_layers a__ = num_attention_heads a__ = intermediate_size a__ = hidden_act a__ = hidden_dropout_prob a__ = attention_probs_dropout_prob a__ = type_sequence_label_size a__ = initializer_range a__ = scope a__ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ = (image_size // patch_size) ** 2 a__ = num_patches + 1 def lowercase__ ( self ) -> Optional[int]: """simple docstring""" a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ = None if self.use_labels: a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ = self.get_config() return config, pixel_values, labels def lowercase__ ( self ) -> List[str]: """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" a__ = ViTModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() a__ = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" a__ = ViTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() a__ = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images a__ = 1 a__ = ViTForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" a__ = self.type_sequence_label_size a__ = ViTForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() a__ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ = 1 a__ = ViTForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" a__ = self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ) = config_and_inputs a__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase(_lowercase , _lowercase , unittest.TestCase ): __snake_case: int = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) __snake_case: Any = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) __snake_case: Optional[Any] = True __snake_case: int = False __snake_case: Dict = False __snake_case: str = False def lowercase__ ( self ) -> int: """simple docstring""" a__ = ViTModelTester(self ) a__ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 ) def lowercase__ ( self ) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" pass def lowercase__ ( self ) -> Dict: """simple docstring""" a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ = model_class(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ = model_class(__SCREAMING_SNAKE_CASE ) a__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ = [*signature.parameters.keys()] a__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def lowercase__ ( self ) -> List[Any]: """simple docstring""" a__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" a__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE ) def lowercase__ ( self ) -> Tuple: """simple docstring""" a__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ = ViTModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def __magic_name__ ( ) -> Dict: a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase(unittest.TestCase ): @cached_property def lowercase__ ( self ) -> Dict: """simple docstring""" return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def lowercase__ ( self ) -> Any: """simple docstring""" a__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__SCREAMING_SNAKE_CASE ) a__ = self.default_image_processor a__ = prepare_img() a__ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): a__ = model(**__SCREAMING_SNAKE_CASE ) # verify the logits a__ = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) a__ = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def lowercase__ ( self ) -> Any: """simple docstring""" a__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__SCREAMING_SNAKE_CASE ) a__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0 ) a__ = prepare_img() a__ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) a__ = inputs.pixel_values.to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): a__ = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE ) # verify the logits a__ = torch.Size((1, 3_6_0_1, 3_8_4) ) self.assertEqual(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE ) a__ = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def lowercase__ ( self ) -> Tuple: """simple docstring""" a__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' ) a__ = self.default_image_processor a__ = prepare_img() a__ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) a__ = inputs.pixel_values.to(__SCREAMING_SNAKE_CASE ) # forward pass to make sure inference works in fp16 with torch.no_grad(): a__ = model(__SCREAMING_SNAKE_CASE )
273
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowercase(_lowercase , _lowercase , unittest.TestCase ): __snake_case: Optional[int] = IFImgaImgSuperResolutionPipeline __snake_case: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} __snake_case: Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} ) __snake_case: List[Any] = PipelineTesterMixin.required_optional_params - {'latents'} def lowercase__ ( self ) -> Tuple: """simple docstring""" return self._get_superresolution_dummy_components() def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Union[str, Any]: """simple docstring""" if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ): a__ = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: a__ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) a__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) a__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) a__ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowercase__ ( self ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def lowercase__ ( self ) -> Any: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def lowercase__ ( self ) -> List[Any]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def lowercase__ ( self ) -> List[str]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" self._test_save_load_local() def lowercase__ ( self ) -> Any: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
273
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : int = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class _lowerCAmelCase( _a): """simple docstring""" lowerCamelCase__ = '''t5''' lowerCamelCase__ = ['''past_key_values'''] lowerCamelCase__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , UpperCAmelCase=3_21_28 , UpperCAmelCase=5_12 , UpperCAmelCase=64 , UpperCAmelCase=20_48 , UpperCAmelCase=6 , UpperCAmelCase=None , UpperCAmelCase=8 , UpperCAmelCase=32 , UpperCAmelCase=1_28 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-6 , UpperCAmelCase=1.0 , UpperCAmelCase="relu" , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=0 , UpperCAmelCase=1 , **UpperCAmelCase , )-> List[str]: __A = vocab_size __A = d_model __A = d_kv __A = d_ff __A = num_layers __A = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __A = num_heads __A = relative_attention_num_buckets __A = relative_attention_max_distance __A = dropout_rate __A = layer_norm_epsilon __A = initializer_factor __A = feed_forward_proj __A = use_cache __A = self.feed_forward_proj.split('''-''' ) __A = act_info[-1] __A = act_info[0] == '''gated''' if len(UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase ) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __A = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase , ) class _lowerCAmelCase( _a): """simple docstring""" @property def SCREAMING_SNAKE_CASE__ ( self )-> Mapping[str, Mapping[int, str]]: __A = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: __A = '''past_encoder_sequence + sequence''' __A = {0: '''batch'''} __A = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __A = {0: '''batch''', 1: '''decoder_sequence'''} __A = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase , direction='''inputs''' ) return common_inputs @property def SCREAMING_SNAKE_CASE__ ( self )-> int: return 13
341
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class _lowerCAmelCase: """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=14 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , )-> Dict: __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_token_type_ids __A = use_input_mask __A = use_labels __A = use_mc_token_ids __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_labels __A = num_choices __A = scope __A = self.vocab_size - 1 def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]: __A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __A = None if self.use_input_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __A = None if self.use_mc_token_ids: __A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __A = None __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __A = ids_tensor([self.batch_size] , self.num_choices ) __A = self.get_config() __A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE__ ( self )-> List[str]: return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Tuple: __A = CTRLModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() model(UpperCAmelCase , token_type_ids=UpperCAmelCase , head_mask=UpperCAmelCase ) model(UpperCAmelCase , token_type_ids=UpperCAmelCase ) __A = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Optional[Any]: __A = CTRLLMHeadModel(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]: __A = self.prepare_config_and_inputs() ( ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ) = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Optional[int]: __A = self.num_labels __A = CTRLForSequenceClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class _lowerCAmelCase( _a , _a , _a , unittest.TestCase): """simple docstring""" lowerCamelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () lowerCamelCase__ = (CTRLLMHeadModel,) if is_torch_available() else () lowerCamelCase__ = ( { '''feature-extraction''': CTRLModel, '''text-classification''': CTRLForSequenceClassification, '''text-generation''': CTRLLMHeadModel, '''zero-shot''': CTRLForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = False def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Dict: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def SCREAMING_SNAKE_CASE__ ( self )-> str: __A = CTRLModelTester(self ) __A = ConfigTester(self , config_class=UpperCAmelCase , n_embd=37 ) def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self )-> List[str]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]: __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self )-> Dict: __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]: pass @slow def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = CTRLModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]: pass @require_torch class _lowerCAmelCase( unittest.TestCase): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self )-> Dict: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]: __A = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(UpperCAmelCase ) __A = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCAmelCase ) # Legal the president is __A = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __A = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase ) self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase )
341
1
'''simple docstring''' import numpy as np import qiskit def a_ ( _UpperCAmelCase : int = 8 ,_UpperCAmelCase : int | None = None ) -> str: __snake_case : Tuple = np.random.default_rng(seed=_UpperCAmelCase ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. __snake_case : int = 6 * key_len # Measurement basis for Alice's qubits. __snake_case : str = rng.integers(2 ,size=_UpperCAmelCase ) # The set of states Alice will prepare. __snake_case : Optional[int] = rng.integers(2 ,size=_UpperCAmelCase ) # Measurement basis for Bob's qubits. __snake_case : List[Any] = rng.integers(2 ,size=_UpperCAmelCase ) # Quantum Circuit to simulate BB84 __snake_case : Optional[int] = qiskit.QuantumCircuit(_UpperCAmelCase ,name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(_UpperCAmelCase ): if alice_state[index] == 1: bbaa_circ.x(_UpperCAmelCase ) if alice_basis[index] == 1: bbaa_circ.h(_UpperCAmelCase ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(_UpperCAmelCase ): if bob_basis[index] == 1: bbaa_circ.h(_UpperCAmelCase ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. __snake_case : Optional[Any] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. __snake_case : Optional[int] = qiskit.execute(_UpperCAmelCase ,_UpperCAmelCase ,shots=1 ,seed_simulator=_UpperCAmelCase ) # Returns the result of measurement. __snake_case : Any = job.result().get_counts(_UpperCAmelCase ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. __snake_case : str = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. __snake_case : List[Any] = gen_key[:key_len] if len(_UpperCAmelCase ) >= key_len else gen_key.ljust(_UpperCAmelCase ,'0' ) return key if __name__ == "__main__": print(F"""The generated key is : {bbaa(8, seed=0)}""") from doctest import testmod testmod()
286
'''simple docstring''' import pickle import numpy as np from matplotlib import pyplot as plt class snake_case__ : def __init__( self : List[Any] , __a : str , __a : Dict , __a : List[Any] , __a : str , __a : str , __a : List[str]=0.2 , __a : Any=0.2 ) -> Any: '''simple docstring''' __snake_case : Any = bp_numa __snake_case : str = bp_numa __snake_case : Optional[Any] = bp_numa __snake_case : Any = conva_get[:2] __snake_case : Dict = conva_get[2] __snake_case : Optional[int] = size_pa __snake_case : str = rate_w __snake_case : Optional[Any] = rate_t __snake_case : Optional[Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] __snake_case : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) __snake_case : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) __snake_case : Optional[int] = -2 * np.random.rand(self.conva[1] ) + 1 __snake_case : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1 __snake_case : str = -2 * np.random.rand(self.num_bpa ) + 1 def A_ ( self : int , __a : Dict ) -> Optional[Any]: '''simple docstring''' # save model dict with pickle __snake_case : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__a , 'wb' ) as f: pickle.dump(__a , __a ) print(f'''Model saved: {save_path}''' ) @classmethod def A_ ( cls : List[Any] , __a : Union[str, Any] ) -> int: '''simple docstring''' # read saved model with open(__a , 'rb' ) as f: __snake_case : Dict = pickle.load(__a ) # noqa: S301 __snake_case : Tuple = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) __snake_case : Optional[Any] = model_dic.get('size_pooling1' ) __snake_case : int = model_dic.get('num_bp1' ) __snake_case : Optional[Any] = model_dic.get('num_bp2' ) __snake_case : Optional[Any] = model_dic.get('num_bp3' ) __snake_case : Any = model_dic.get('rate_weight' ) __snake_case : Optional[int] = model_dic.get('rate_thre' ) # create model instance __snake_case : Any = CNN(__a , __a , __a , __a , __a , __a , __a ) # modify model parameter __snake_case : Dict = model_dic.get('w_conv1' ) __snake_case : Any = model_dic.get('wkj' ) __snake_case : List[str] = model_dic.get('vji' ) __snake_case : int = model_dic.get('thre_conv1' ) __snake_case : Optional[int] = model_dic.get('thre_bp2' ) __snake_case : List[Any] = model_dic.get('thre_bp3' ) return conv_ins def A_ ( self : List[Any] , __a : Tuple ) -> Optional[int]: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def A_ ( self : Any , __a : Union[str, Any] ) -> List[Any]: '''simple docstring''' return round(__a , 3 ) def A_ ( self : Optional[Any] , __a : Tuple , __a : List[str] , __a : Dict , __a : Optional[int] , __a : List[str] ) -> str: '''simple docstring''' # convolution process __snake_case : int = convs[0] __snake_case : List[str] = convs[1] __snake_case : Optional[Any] = np.shape(__a )[0] # get the data slice of original image data, data_focus __snake_case : str = [] for i_focus in range(0 , size_data - size_conv + 1 , __a ): for j_focus in range(0 , size_data - size_conv + 1 , __a ): __snake_case : Any = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__a ) # calculate the feature map of every single kernel, and saved as list of matrix __snake_case : Optional[int] = [] __snake_case : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__a ): __snake_case : Optional[int] = [] for i_focus in range(len(__a ) ): __snake_case : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__a ) ) __snake_case : List[Any] = np.asmatrix(__a ).reshape( __a , __a ) data_featuremap.append(__a ) # expanding the data slice to One dimenssion __snake_case : Union[str, Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__a ) ) __snake_case : List[Any] = np.asarray(__a ) return focus_list, data_featuremap def A_ ( self : Any , __a : int , __a : Tuple , __a : List[Any]="average_pool" ) -> Dict: '''simple docstring''' # pooling process __snake_case : List[str] = len(featuremaps[0] ) __snake_case : Tuple = int(size_map / size_pooling ) __snake_case : int = [] for i_map in range(len(__a ) ): __snake_case : str = featuremaps[i_map] __snake_case : Optional[Any] = [] for i_focus in range(0 , __a , __a ): for j_focus in range(0 , __a , __a ): __snake_case : Dict = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__a ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__a ) ) __snake_case : List[str] = np.asmatrix(__a ).reshape(__a , __a ) featuremap_pooled.append(__a ) return featuremap_pooled def A_ ( self : List[str] , __a : Union[str, Any] ) -> int: '''simple docstring''' # expanding three dimension data to one dimension list __snake_case : Tuple = [] for i in range(len(__a ) ): __snake_case : Optional[int] = np.shape(data[i] ) __snake_case : List[str] = data[i].reshape(1 , shapes[0] * shapes[1] ) __snake_case : List[Any] = data_listed.getA().tolist()[0] data_expanded.extend(__a ) __snake_case : Optional[int] = np.asarray(__a ) return data_expanded def A_ ( self : Union[str, Any] , __a : int ) -> Any: '''simple docstring''' # expanding matrix to one dimension list __snake_case : int = np.asarray(__a ) __snake_case : str = np.shape(__a ) __snake_case : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def A_ ( self : List[Any] , __a : str , __a : Optional[int] , __a : List[str] , __a : int , __a : List[str] ) -> Dict: '''simple docstring''' __snake_case : Union[str, Any] = [] __snake_case : Tuple = 0 for i_map in range(__a ): __snake_case : Union[str, Any] = np.ones((size_map, size_map) ) for i in range(0 , __a , __a ): for j in range(0 , __a , __a ): __snake_case : Any = pd_pool[ i_pool ] __snake_case : List[Any] = i_pool + 1 __snake_case : List[Any] = np.multiply( __a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(__a ) return pd_all def A_ ( self : Tuple , __a : List[str] , __a : Optional[int] , __a : Union[str, Any] , __a : Dict , __a : Tuple , __a : Optional[int]=bool ) -> List[Any]: '''simple docstring''' # model traning print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__a )) ) print((' - - Shape: Teach_Data ', np.shape(__a )) ) __snake_case : str = 0 __snake_case : List[str] = [] __snake_case : List[Any] = 10000 while rp < n_repeat and mse >= error_accuracy: __snake_case : int = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__a ) ): # print('------------Learning Image: %d--------------'%p) __snake_case : List[Any] = np.asmatrix(datas_train[p] ) __snake_case : Optional[Any] = np.asarray(datas_teach[p] ) __snake_case , __snake_case : List[Any] = self.convolute( __a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __snake_case : Tuple = self.pooling(__a , self.size_poolinga ) __snake_case : Dict = np.shape(__a ) __snake_case : Tuple = self._expand(__a ) __snake_case : str = data_bp_input __snake_case : List[Any] = np.dot(__a , self.vji.T ) - self.thre_bpa __snake_case : Any = self.sig(__a ) __snake_case : Tuple = np.dot(__a , self.wkj.T ) - self.thre_bpa __snake_case : Optional[Any] = self.sig(__a ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- __snake_case : Tuple = np.multiply( (data_teach - bp_outa) , np.multiply(__a , (1 - bp_outa) ) ) __snake_case : Tuple = np.multiply( np.dot(__a , self.wkj ) , np.multiply(__a , (1 - bp_outa) ) ) __snake_case : Union[str, Any] = np.dot(__a , self.vji ) __snake_case : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga) __snake_case : Tuple = pd_conva_pooled.T.getA().tolist() __snake_case : Optional[Any] = self._calculate_gradient_from_pool( __a , __a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): __snake_case : int = self._expand_mat(pd_conva_all[k_conv] ) __snake_case : Optional[int] = self.rate_weight * np.dot(__a , __a ) __snake_case : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) __snake_case : Optional[Any] = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer __snake_case : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight __snake_case : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight __snake_case : str = self.thre_bpa - pd_k_all * self.rate_thre __snake_case : Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image __snake_case : Any = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) __snake_case : Tuple = rp + 1 __snake_case : Tuple = error_count / patterns all_mse.append(__a ) def draw_error(): __snake_case : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__a , '+-' ) plt.plot(__a , 'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__a , alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A_ ( self : Tuple , __a : Union[str, Any] ) -> List[Any]: '''simple docstring''' # model predict __snake_case : str = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__a )) ) for p in range(len(__a ) ): __snake_case : int = np.asmatrix(datas_test[p] ) __snake_case , __snake_case : str = self.convolute( __a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __snake_case : List[str] = self.pooling(__a , self.size_poolinga ) __snake_case : List[Any] = self._expand(__a ) __snake_case : Optional[Any] = data_bp_input __snake_case : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa __snake_case : Any = self.sig(__a ) __snake_case : Any = bp_outa * self.wkj.T - self.thre_bpa __snake_case : str = self.sig(__a ) produce_out.extend(bp_outa.getA().tolist() ) __snake_case : List[Any] = [list(map(self.do_round , __a ) ) for each in produce_out] return np.asarray(__a ) def A_ ( self : Optional[Any] , __a : Optional[int] ) -> Tuple: '''simple docstring''' # return the data of image after convoluting process so we can check it out __snake_case : int = np.asmatrix(__a ) __snake_case , __snake_case : int = self.convolute( __a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __snake_case : Dict = self.pooling(__a , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
286
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING a__ = logging.get_logger(__name__) a__ = { '''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''', } class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : int = "blip_2_vision_model" def __init__( self , _a=1_4_0_8 , _a=6_1_4_4 , _a=3_9 , _a=1_6 , _a=2_2_4 , _a=1_4 , _a="gelu" , _a=0.0_0001 , _a=0.0 , _a=1e-1_0 , _a=True , **_a , ) -> str: super().__init__(**_a ) _a : Any = hidden_size _a : Optional[Any] = intermediate_size _a : str = num_hidden_layers _a : Union[str, Any] = num_attention_heads _a : Any = patch_size _a : str = image_size _a : Tuple = initializer_range _a : List[Any] = attention_dropout _a : Any = layer_norm_eps _a : List[Any] = hidden_act _a : Optional[int] = qkv_bias @classmethod def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) _a , _a : List[str] = cls.get_config_dict(_a , **_a ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('''model_type''' ) == "blip-2": _a : Optional[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_a , **_a ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = "blip_2_qformer" def __init__( self , _a=3_0_5_2_2 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=0.02 , _a=1e-1_2 , _a=0 , _a="absolute" , _a=2 , _a=1_4_0_8 , **_a , ) -> Union[str, Any]: super().__init__(pad_token_id=_a , **_a ) _a : List[str] = vocab_size _a : List[str] = hidden_size _a : List[str] = num_hidden_layers _a : int = num_attention_heads _a : List[Any] = hidden_act _a : str = intermediate_size _a : Dict = hidden_dropout_prob _a : int = attention_probs_dropout_prob _a : str = max_position_embeddings _a : int = initializer_range _a : Optional[Any] = layer_norm_eps _a : Optional[Any] = position_embedding_type _a : Any = cross_attention_frequency _a : int = encoder_hidden_size @classmethod def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) _a , _a : Optional[int] = cls.get_config_dict(_a , **_a ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('''model_type''' ) == "blip-2": _a : Optional[int] = config_dict['''qformer_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_a , **_a ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Dict = "blip-2" UpperCAmelCase__ : Dict = True def __init__( self , _a=None , _a=None , _a=None , _a=3_2 , **_a ) -> Optional[int]: super().__init__(**_a ) if vision_config is None: _a : Any = {} logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' ) if qformer_config is None: _a : int = {} logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' ) if text_config is None: _a : Union[str, Any] = {} logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' ) _a : List[Any] = BlipaVisionConfig(**_a ) _a : Tuple = BlipaQFormerConfig(**_a ) _a : Tuple = text_config['''model_type'''] if '''model_type''' in text_config else '''opt''' _a : Union[str, Any] = CONFIG_MAPPING[text_model_type](**_a ) _a : Dict = self.text_config.tie_word_embeddings _a : Dict = self.text_config.is_encoder_decoder _a : Any = num_query_tokens _a : Tuple = self.vision_config.hidden_size _a : Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _a : Tuple = 1.0 _a : Any = 0.02 @classmethod def __lowercase ( cls , _a , _a , _a , **_a , ) -> Tuple: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , ) def __lowercase ( self ) -> List[Any]: _a : Dict = copy.deepcopy(self.__dict__ ) _a : int = self.vision_config.to_dict() _a : List[str] = self.qformer_config.to_dict() _a : int = self.text_config.to_dict() _a : Dict = self.__class__.model_type return output
578
def __UpperCAmelCase ( __a : int = 100 ) -> int: """simple docstring""" _a : str = (n * (n + 1) // 2) ** 2 _a : int = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f'''{solution() = }''')
578
1
from collections.abc import Callable def a ( snake_case__: Callable[[float], float] , snake_case__: float , snake_case__: float ): '''simple docstring''' lowercase_ = a lowercase_ = b if function(snake_case__ ) == 0: # one of the a or b is a root for the function return a elif function(snake_case__ ) == 0: return b elif ( function(snake_case__ ) * function(snake_case__ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: lowercase_ = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(snake_case__ ) == 0: return mid elif function(snake_case__ ) * function(snake_case__ ) < 0: lowercase_ = mid else: lowercase_ = mid lowercase_ = start + (end - start) / 2.0 return mid def a ( snake_case__: float ): '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
97
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _lowerCamelCase : Optional[int] = '''\ @inproceedings{lin-2004-rouge, title = "{ROUGE}: A Package for Automatic Evaluation of Summaries", author = "Lin, Chin-Yew", booktitle = "Text Summarization Branches Out", month = jul, year = "2004", address = "Barcelona, Spain", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W04-1013", pages = "74--81", } ''' _lowerCamelCase : List[str] = '''\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge ''' _lowerCamelCase : Dict = ''' Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring, `"rougeL"`: Longest common subsequence based scoring. `"rougeLSum"`: rougeLsum splits text using `"\n"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric(\'rouge\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\'] >>> print(results["rouge1"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results["rouge1"].mid.fmeasure) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): '''simple docstring''' def A ( self : Optional[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ): '''simple docstring''' if rouge_types is None: _snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] _snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase ) if use_aggregator: _snake_case = scoring.BootstrapAggregator() else: _snake_case = [] for ref, pred in zip(lowercase , lowercase ): _snake_case = scorer.score(lowercase , lowercase ) if use_aggregator: aggregator.add_scores(lowercase ) else: scores.append(lowercase ) if use_aggregator: _snake_case = aggregator.aggregate() else: _snake_case = {} for key in scores[0]: _snake_case = [score[key] for score in scores] return result
686
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a ( __magic_name__ ,unittest.TestCase ): _snake_case = ShapEImgaImgPipeline _snake_case = ['''image'''] _snake_case = ['''image'''] _snake_case = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] _snake_case = False @property def __snake_case ( self : str ): return 32 @property def __snake_case ( self : Optional[int] ): return 32 @property def __snake_case ( self : int ): return self.time_input_dim * 4 @property def __snake_case ( self : List[Any] ): return 8 @property def __snake_case ( self : Optional[Any] ): torch.manual_seed(0 ) snake_case : List[str] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, ) snake_case : List[Any] = CLIPVisionModel(SCREAMING_SNAKE_CASE_ ) return model @property def __snake_case ( self : str ): snake_case : Optional[int] = CLIPImageProcessor( crop_size=2_24, do_center_crop=SCREAMING_SNAKE_CASE_, do_normalize=SCREAMING_SNAKE_CASE_, do_resize=SCREAMING_SNAKE_CASE_, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=2_24, ) return image_processor @property def __snake_case ( self : Optional[int] ): torch.manual_seed(0 ) snake_case : List[Any] = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } snake_case : List[str] = PriorTransformer(**SCREAMING_SNAKE_CASE_ ) return model @property def __snake_case ( self : Dict ): torch.manual_seed(0 ) snake_case : List[str] = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } snake_case : Optional[Any] = ShapERenderer(**SCREAMING_SNAKE_CASE_ ) return model def __snake_case ( self : List[Any] ): snake_case : Any = self.dummy_prior snake_case : str = self.dummy_image_encoder snake_case : Dict = self.dummy_image_processor snake_case : Tuple = self.dummy_renderer snake_case : Optional[int] = HeunDiscreteScheduler( beta_schedule='''exp''', num_train_timesteps=10_24, prediction_type='''sample''', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, ) snake_case : Dict = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __snake_case ( self : Dict, SCREAMING_SNAKE_CASE_ : Union[str, Any], SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ): snake_case : int = floats_tensor((1, 3, 64, 64), rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ ) if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ): snake_case : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: snake_case : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) snake_case : int = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __snake_case ( self : List[Any] ): snake_case : List[str] = '''cpu''' snake_case : Optional[Any] = self.get_dummy_components() snake_case : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) snake_case : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) snake_case : Any = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) ) snake_case : Dict = output.images[0] snake_case : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) snake_case : Optional[int] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __snake_case ( self : Optional[Any] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __snake_case ( self : Optional[int] ): snake_case : Union[str, Any] = torch_device == '''cpu''' snake_case : int = True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, ) def __snake_case ( self : List[Any] ): snake_case : Tuple = self.get_dummy_components() snake_case : Union[str, Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) snake_case : str = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) snake_case : List[str] = 1 snake_case : Optional[Any] = 2 snake_case : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) for key in inputs.keys(): if key in self.batch_params: snake_case : Optional[int] = batch_size * [inputs[key]] snake_case : Dict = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a ( unittest.TestCase ): def __snake_case ( self : Optional[int] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : int ): snake_case : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) snake_case : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) snake_case : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) snake_case : Any = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) snake_case : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) snake_case : List[str] = pipe( SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
716
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear", "self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed", "self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } UpperCAmelCase = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A ( A_ : Dict , A_ : Any , A_ : List[str] , A_ : Tuple , A_ : Optional[Any] ): for attribute in key.split('''.''' ): snake_case : Tuple = getattr(A_ , A_ ) if weight_type is not None: snake_case : Optional[Any] = getattr(A_ , A_ ).shape else: snake_case : Any = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case : Optional[Any] = value elif weight_type == "weight_g": snake_case : Any = value elif weight_type == "weight_v": snake_case : int = value elif weight_type == "bias": snake_case : Any = value else: snake_case : Optional[int] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def A ( A_ : str , A_ : str ): snake_case : Dict = [] snake_case : Optional[Any] = fairseq_model.state_dict() snake_case : int = hf_model.feature_extractor for name, value in fairseq_dict.items(): snake_case : List[Any] = False if "conv_layers" in name: load_conv_layer( A_ , A_ , A_ , A_ , hf_model.config.feat_extract_norm == '''group''' , ) snake_case : int = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: snake_case : Optional[int] = True if "*" in mapped_key: snake_case : Dict = name.split(A_ )[0].split('''.''' )[-2] snake_case : List[str] = mapped_key.replace('''*''' , A_ ) if "weight_g" in name: snake_case : Optional[Any] = '''weight_g''' elif "weight_v" in name: snake_case : Optional[Any] = '''weight_v''' elif "bias" in name and "relative_attention_bias" not in name: snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case : Optional[int] = '''weight''' else: snake_case : Union[str, Any] = None set_recursively(A_ , A_ , A_ , A_ , A_ ) continue if not is_used: unused_weights.append(A_ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def A ( A_ : Tuple , A_ : Any , A_ : int , A_ : Dict , A_ : Optional[Any] ): snake_case : List[str] = full_name.split('''conv_layers.''' )[-1] snake_case : List[Any] = name.split('''.''' ) snake_case : Dict = int(items[0] ) snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case : Tuple = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case : Dict = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) snake_case : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(A_ ) @torch.no_grad() def A ( A_ : Tuple , A_ : Any , A_ : Optional[Any]=None ): # load the pre-trained checkpoints snake_case : Any = torch.load(A_ ) snake_case : List[str] = WavLMConfigOrig(checkpoint['''cfg'''] ) snake_case : Union[str, Any] = WavLMOrig(A_ ) model.load_state_dict(checkpoint['''model'''] ) model.eval() if config_path is not None: snake_case : Union[str, Any] = WavLMConfig.from_pretrained(A_ ) else: snake_case : Any = WavLMConfig() snake_case : Optional[int] = WavLMModel(A_ ) recursively_load_weights(A_ , A_ ) hf_wavlm.save_pretrained(A_ ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCAmelCase = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
555
0
'''simple docstring''' import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCamelCase__ : List[Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ : Optional[int] = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) UpperCamelCase__ : Optional[int] = spec.loader.load_module() UpperCamelCase__ : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCamelCase__ : Union[str, Any] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)") UpperCamelCase__ : Optional[Any] = { "CLIPConfigMixin", "DecisionTransformerConfigMixin", "EncoderDecoderConfigMixin", "RagConfigMixin", "SpeechEncoderDecoderConfigMixin", "VisionEncoderDecoderConfigMixin", "VisionTextDualEncoderConfigMixin", } def lowerCAmelCase_ ( ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = [] for config_class in list(CONFIG_MAPPING.values() ): _SCREAMING_SNAKE_CASE = False # source code of `config_class` _SCREAMING_SNAKE_CASE = inspect.getsource(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint # verify the checkpoint name corresponds to the checkpoint link _SCREAMING_SNAKE_CASE = F"https://huggingface.co/{ckpt_name}" if ckpt_link == ckpt_link_from_name: _SCREAMING_SNAKE_CASE = True break _SCREAMING_SNAKE_CASE = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: _SCREAMING_SNAKE_CASE = """\n""".join(sorted(SCREAMING_SNAKE_CASE_ ) ) raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
591
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: """simple docstring""" # initialize config if "resnet-50" in model_name: _SCREAMING_SNAKE_CASE = ResNetConfig.from_pretrained("""microsoft/resnet-50""" ) elif "resnet-101" in model_name: _SCREAMING_SNAKE_CASE = ResNetConfig.from_pretrained("""microsoft/resnet-101""" ) else: raise ValueError("""Model name should include either resnet50 or resnet101""" ) _SCREAMING_SNAKE_CASE = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE_ , backbone_config=SCREAMING_SNAKE_CASE_ ) # set label attributes _SCREAMING_SNAKE_CASE = """panoptic""" in model_name if is_panoptic: _SCREAMING_SNAKE_CASE = 2_50 else: _SCREAMING_SNAKE_CASE = 91 _SCREAMING_SNAKE_CASE = """huggingface/label-files""" _SCREAMING_SNAKE_CASE = """coco-detection-id2label.json""" _SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) ) _SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE = idalabel _SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" # here we list all keys to be renamed (original name on the left, our name on the right) _SCREAMING_SNAKE_CASE = [] # stem # fmt: off rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") ) rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") ) rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") ) rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") ) rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", ) ) rename_keys.append( ( F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") ) rename_keys.append( (F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", F"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", F"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) return rename_keys def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = val def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = """""" if is_panoptic: _SCREAMING_SNAKE_CASE = """detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) _SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE = in_proj_weight[:2_56, :] _SCREAMING_SNAKE_CASE = in_proj_bias[:2_56] _SCREAMING_SNAKE_CASE = in_proj_weight[2_56:5_12, :] _SCREAMING_SNAKE_CASE = in_proj_bias[2_56:5_12] _SCREAMING_SNAKE_CASE = in_proj_weight[-2_56:, :] _SCREAMING_SNAKE_CASE = in_proj_bias[-2_56:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" ) _SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE = in_proj_weight[:2_56, :] _SCREAMING_SNAKE_CASE = in_proj_bias[:2_56] _SCREAMING_SNAKE_CASE = in_proj_weight[2_56:5_12, :] _SCREAMING_SNAKE_CASE = in_proj_bias[2_56:5_12] _SCREAMING_SNAKE_CASE = in_proj_weight[-2_56:, :] _SCREAMING_SNAKE_CASE = in_proj_bias[-2_56:] # read in weights + bias of input projection layer of cross-attention _SCREAMING_SNAKE_CASE = state_dict.pop( F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) _SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) of cross-attention to the state dict _SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:2_56, :] _SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:2_56] _SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[2_56:5_12, :] _SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[2_56:5_12] _SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-2_56:, :] _SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-2_56:] def lowerCAmelCase_ ( ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" _SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_detr_config(SCREAMING_SNAKE_CASE_ ) # load original model from torch hub _SCREAMING_SNAKE_CASE = { """detr-resnet-50""": """detr_resnet50""", """detr-resnet-101""": """detr_resnet101""", } logger.info(F"Converting model {model_name}..." ) _SCREAMING_SNAKE_CASE = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE_ ).eval() _SCREAMING_SNAKE_CASE = detr.state_dict() # rename keys for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE_ ): if is_panoptic: _SCREAMING_SNAKE_CASE = """detr.""" + src rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # query, key and value matrices need special treatment read_in_q_k_v(SCREAMING_SNAKE_CASE_ , is_panoptic=SCREAMING_SNAKE_CASE_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _SCREAMING_SNAKE_CASE = """detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): _SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: _SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): _SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = val # finally, create HuggingFace model and load state dict _SCREAMING_SNAKE_CASE = DetrForSegmentation(SCREAMING_SNAKE_CASE_ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) model.eval() # verify our conversion on an image _SCREAMING_SNAKE_CASE = """coco_panoptic""" if is_panoptic else """coco_detection""" _SCREAMING_SNAKE_CASE = DetrImageProcessor(format=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = processor(images=prepare_img() , return_tensors="""pt""" ) _SCREAMING_SNAKE_CASE = encoding["""pixel_values"""] _SCREAMING_SNAKE_CASE = detr(SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: # Upload model and image processor to the hub logger.info("""Uploading PyTorch model and image processor to the hub...""" ) model.push_to_hub(F"nielsr/{model_name}" ) processor.push_to_hub(F"nielsr/{model_name}" ) if __name__ == "__main__": UpperCamelCase__ : Any = argparse.ArgumentParser() parser.add_argument( "--model_name", default="detr-resnet-50", type=str, choices=["detr-resnet-50", "detr-resnet-101"], help="Name of the DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") UpperCamelCase__ : Any = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
591
1
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase : Any = mf_knapsack(i - 1 , __magic_name__ , __magic_name__ , __magic_name__ ) else: lowercase : List[Any] = max( mf_knapsack(i - 1 , __magic_name__ , __magic_name__ , __magic_name__ ) , mf_knapsack(i - 1 , __magic_name__ , __magic_name__ , j - wt[i - 1] ) + val[i - 1] , ) lowercase : List[str] = val return f[i][j] def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' lowercase : Optional[Any] = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase : Optional[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase : List[str] = dp[i - 1][w_] return dp[n][w_], dp def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' if not (isinstance(__magic_name__ , (list, tuple) ) and isinstance(__magic_name__ , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) lowercase : str = len(__magic_name__ ) if num_items != len(__magic_name__ ): lowercase : int = ( '''The number of weights must be the same as the number of values.\n''' F"""But got {num_items} weights and {len(__magic_name__ )} values""" ) raise ValueError(__magic_name__ ) for i in range(__magic_name__ ): if not isinstance(wt[i] , __magic_name__ ): lowercase : List[str] = ( '''All weights must be integers but got weight of ''' F"""type {type(wt[i] )} at index {i}""" ) raise TypeError(__magic_name__ ) lowercase : Any = knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) lowercase : set = set() _construct_solution(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) return optimal_val, example_optional_set def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(__magic_name__ , __magic_name__ , i - 1 , __magic_name__ , __magic_name__ ) else: optimal_set.add(__magic_name__ ) _construct_solution(__magic_name__ , __magic_name__ , i - 1 , j - wt[i - 1] , __magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = [3, 2, 4, 4] lowerCAmelCase_ = [4, 3, 2, 3] lowerCAmelCase_ = 4 lowerCAmelCase_ = 6 lowerCAmelCase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCAmelCase_ , lowerCAmelCase_ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCAmelCase_ , lowerCAmelCase_ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
721
def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def snake_case( __magic_name__ ) -> list[tuple[int, int]]: '''simple docstring''' lowercase : Optional[Any] = 0 lowercase : Tuple = len(__magic_name__ ) # No of vertices in graph lowercase : int = [0] * n lowercase : int = [False] * n def dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowercase : List[str] = True lowercase : str = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(__magic_name__ , __magic_name__ , __magic_name__ , id_ ) lowercase : Tuple = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge lowercase : List[Any] = min(low[at] , low[to] ) lowercase : list[tuple[int, int]] = [] for i in range(__magic_name__ ): if not visited[i]: dfs(__magic_name__ , -1 , __magic_name__ , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
596
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase : Any = { "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", "MegaForCausalLM", "MegaForMaskedLM", "MegaForMultipleChoice", "MegaForQuestionAnswering", "MegaForSequenceClassification", "MegaForTokenClassification", "MegaModel", "MegaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
457
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class __lowercase ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> Dict: '''simple docstring''' lowerCamelCase = logging.get_logger() # the current default level is logging.WARNING lowerCamelCase = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(A ) def __A ( self ) -> Tuple: '''simple docstring''' lowerCamelCase = logging.get_verbosity() lowerCamelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) lowerCamelCase = """Testing 1, 2, 3""" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(A ) as cl: logger.warning(A ) self.assertEqual(cl.out , msg + """\n""" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(A ) as cl: logger.warning(A ) self.assertEqual(cl.out , """""" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(A ) as cl: logger.warning(A ) self.assertEqual(cl.out , msg + """\n""" ) # restore to the original level logging.set_verbosity(A ) @mockenv(TRANSFORMERS_VERBOSITY="""error""" ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' transformers.utils.logging._reset_library_root_logger() # this action activates the env var lowerCamelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) lowerCamelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , A ) lowerCamelCase = logging.log_levels[env_level_str] lowerCamelCase = logging.get_verbosity() self.assertEqual( A , A , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level lowerCamelCase = """""" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="""super-error""" ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' transformers.utils.logging._reset_library_root_logger() lowerCamelCase = logging.logging.getLogger() with CaptureLogger(A ) as cl: # this action activates the env var logging.get_logger("""transformers.models.bart.tokenization_bart""" ) self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out ) # no need to restore as nothing was changed def __A ( self ) -> Optional[Any]: '''simple docstring''' transformers.utils.logging._reset_library_root_logger() lowerCamelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) lowerCamelCase = """Testing 1, 2, 3""" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ): # nothing should be logged as env var disables this method with CaptureLogger(A ) as cl: logger.warning_advice(A ) self.assertEqual(cl.out , """""" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(A ) as cl: logger.warning_advice(A ) self.assertEqual(cl.out , msg + """\n""" ) def __lowerCamelCase ( ): '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
457
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json", # See all Dinat models at https://huggingface.co/models?filter=dinat } class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase_ : Tuple = """dinat""" UpperCamelCase_ : int = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Optional[Any] , lowerCamelCase__ : int=4 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : str=64 , lowerCamelCase__ : List[str]=[3, 4, 6, 5] , lowerCamelCase__ : List[Any]=[2, 4, 8, 16] , lowerCamelCase__ : Union[str, Any]=7 , lowerCamelCase__ : Optional[Any]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ : int=3.0 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : Tuple=0.0_2 , lowerCamelCase__ : Union[str, Any]=1e-5 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : List[Any] , ) -> Any: """simple docstring""" super().__init__(**lowerCamelCase__ ) __lowercase = patch_size __lowercase = num_channels __lowercase = embed_dim __lowercase = depths __lowercase = len(lowerCamelCase__ ) __lowercase = num_heads __lowercase = kernel_size __lowercase = dilations __lowercase = mlp_ratio __lowercase = qkv_bias __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = drop_path_rate __lowercase = hidden_act __lowercase = layer_norm_eps __lowercase = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) ) __lowercase = layer_scale_init_value __lowercase = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(lowerCamelCase__ ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
716
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : float ) -> float: """simple docstring""" return 0.0 def _A( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int ) -> tuple[int | float, int | float]: '''simple docstring''' __lowercase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) __lowercase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def _A( UpperCamelCase__ : FilterType , UpperCamelCase__ : int ) -> None: '''simple docstring''' __lowercase = 512 __lowercase = [1] + [0] * (size - 1) __lowercase = [filter_type.process(UpperCamelCase__ ) for item in inputs] __lowercase = [0] * (samplerate - size) # zero-padding outputs += filler __lowercase = np.abs(np.fft.fft(UpperCamelCase__ ) ) __lowercase = 20 * np.logaa(UpperCamelCase__ ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds __lowercase = get_bounds(UpperCamelCase__ , UpperCamelCase__ ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(UpperCamelCase__ ) plt.show() def _A( UpperCamelCase__ : FilterType , UpperCamelCase__ : int ) -> None: '''simple docstring''' __lowercase = 512 __lowercase = [1] + [0] * (size - 1) __lowercase = [filter_type.process(UpperCamelCase__ ) for item in inputs] __lowercase = [0] * (samplerate - size) # zero-padding outputs += filler __lowercase = np.angle(np.fft.fft(UpperCamelCase__ ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(UpperCamelCase__ , -2 * pi ) ) plt.show()
362
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowercase__ ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 / 255 , SCREAMING_SNAKE_CASE=True , ) -> Dict: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _lowerCamelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} _lowerCamelCase : Union[str, Any] = parent _lowerCamelCase : Optional[Any] = batch_size _lowerCamelCase : Optional[int] = num_channels _lowerCamelCase : Union[str, Any] = min_resolution _lowerCamelCase : Optional[int] = max_resolution _lowerCamelCase : List[Any] = do_resize _lowerCamelCase : str = size _lowerCamelCase : Union[str, Any] = do_normalize _lowerCamelCase : Union[str, Any] = image_mean _lowerCamelCase : Tuple = image_std _lowerCamelCase : List[Any] = do_rescale _lowerCamelCase : Dict = rescale_factor _lowerCamelCase : Any = do_pad def UpperCamelCase_ ( self) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> Any: if not batched: _lowerCamelCase : Tuple = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE , Image.Image): _lowerCamelCase , _lowerCamelCase : Any = image.size else: _lowerCamelCase , _lowerCamelCase : int = image.shape[1], image.shape[2] if w < h: _lowerCamelCase : Optional[int] = int(self.size["""shortest_edge"""] * h / w) _lowerCamelCase : Optional[int] = self.size["""shortest_edge"""] elif w > h: _lowerCamelCase : Any = self.size["""shortest_edge"""] _lowerCamelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h) else: _lowerCamelCase : Optional[Any] = self.size["""shortest_edge"""] _lowerCamelCase : Optional[Any] = self.size["""shortest_edge"""] else: _lowerCamelCase : Any = [] for image in image_inputs: _lowerCamelCase , _lowerCamelCase : List[str] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) _lowerCamelCase : List[Any] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE: item[0])[0] _lowerCamelCase : int = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE: item[1])[1] return expected_height, expected_width @require_torch @require_vision class lowercase__ ( A_ ,unittest.TestCase ): __UpperCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = ConditionalDetrImageProcessingTester(self) @property def UpperCamelCase_ ( self) -> str: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_mean""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_std""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""")) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333}) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84}) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: pass def UpperCamelCase_ ( self) -> Any: # Initialize image_processing _lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict) # create random PIL images _lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image) # Test not batched input _lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase_ ( self) -> int: # Initialize image_processing _lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray) # Test not batched input _lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values _lowerCamelCase , _lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values _lowerCamelCase , _lowerCamelCase : Any = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase_ ( self) -> Optional[Any]: # Initialize image_processing _lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor) # Test not batched input _lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values _lowerCamelCase , _lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values _lowerCamelCase , _lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCamelCase_ ( self) -> str: # prepare image and target _lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f: _lowerCamelCase : List[Any] = json.loads(f.read()) _lowerCamelCase : Union[str, Any] = {"""image_id""": 3_9769, """annotations""": target} # encode them _lowerCamelCase : Any = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""") _lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , return_tensors="""pt""") # verify pixel values _lowerCamelCase : Dict = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["""pixel_values"""].shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = torch.tensor([0.27_96, 0.31_38, 0.34_81]) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4)) # verify area _lowerCamelCase : List[str] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , SCREAMING_SNAKE_CASE)) # verify boxes _lowerCamelCase : Union[str, Any] = torch.Size([6, 4]) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , SCREAMING_SNAKE_CASE , atol=1e-3)) # verify image_id _lowerCamelCase : Union[str, Any] = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , SCREAMING_SNAKE_CASE)) # verify is_crowd _lowerCamelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , SCREAMING_SNAKE_CASE)) # verify class_labels _lowerCamelCase : Any = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , SCREAMING_SNAKE_CASE)) # verify orig_size _lowerCamelCase : List[str] = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , SCREAMING_SNAKE_CASE)) # verify size _lowerCamelCase : Tuple = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , SCREAMING_SNAKE_CASE)) @slow def UpperCamelCase_ ( self) -> Optional[Any]: # prepare image, target and masks_path _lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f: _lowerCamelCase : Optional[int] = json.loads(f.read()) _lowerCamelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} _lowerCamelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""") # encode them _lowerCamelCase : List[str] = ConditionalDetrImageProcessor(format="""coco_panoptic""") _lowerCamelCase : Dict = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , masks_path=SCREAMING_SNAKE_CASE , return_tensors="""pt""") # verify pixel values _lowerCamelCase : int = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["""pixel_values"""].shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81]) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4)) # verify area _lowerCamelCase : Union[str, Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , SCREAMING_SNAKE_CASE)) # verify boxes _lowerCamelCase : int = torch.Size([6, 4]) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , SCREAMING_SNAKE_CASE , atol=1e-3)) # verify image_id _lowerCamelCase : List[str] = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , SCREAMING_SNAKE_CASE)) # verify is_crowd _lowerCamelCase : Dict = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , SCREAMING_SNAKE_CASE)) # verify class_labels _lowerCamelCase : str = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , SCREAMING_SNAKE_CASE)) # verify masks _lowerCamelCase : Any = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , SCREAMING_SNAKE_CASE) # verify orig_size _lowerCamelCase : List[Any] = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , SCREAMING_SNAKE_CASE)) # verify size _lowerCamelCase : Optional[Any] = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , SCREAMING_SNAKE_CASE))
88
'''simple docstring''' import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : int ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = AlbertConfig.from_json_file(__lowercase ) print(f'Building PyTorch model from configuration: {config}' ) _UpperCAmelCase = AlbertForPreTraining(__lowercase ) # Load weights from tf checkpoint load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , __lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--albert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained ALBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
236
0
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration __SCREAMING_SNAKE_CASE : Union[str, Any] = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] __SCREAMING_SNAKE_CASE : int = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] __SCREAMING_SNAKE_CASE : str = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) __SCREAMING_SNAKE_CASE : Optional[Any] = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) __SCREAMING_SNAKE_CASE : Union[str, Any] = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def snake_case (__lowercase , __lowercase ) -> Dict: '''simple docstring''' for tf_name, hf_name in patterns: _snake_case : Union[str, Any] = k.replace(__lowercase , __lowercase ) return k def snake_case (__lowercase , __lowercase ) -> BigBirdPegasusForConditionalGeneration: '''simple docstring''' _snake_case : List[str] = BigBirdPegasusConfig(**__lowercase ) _snake_case : Union[str, Any] = BigBirdPegasusForConditionalGeneration(__lowercase ) _snake_case : int = torch_model.state_dict() _snake_case : List[str] = {} # separating decoder weights _snake_case : List[str] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )} _snake_case : List[Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )} for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ): _snake_case : Optional[Any] = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE] if any(__lowercase ): continue _snake_case : int = DECODER_PATTERNS _snake_case : Optional[Any] = rename_state_dict_key(__lowercase , __lowercase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): _snake_case : int = v.T _snake_case : Tuple = torch.from_numpy(__lowercase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ): _snake_case : Tuple = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE] if any(__lowercase ): continue _snake_case : Tuple = REMAINING_PATTERNS _snake_case : Dict = rename_state_dict_key(__lowercase , __lowercase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): _snake_case : Tuple = v.T _snake_case : Any = torch.from_numpy(__lowercase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" _snake_case : str = mapping["model.embed_positions.weight"] _snake_case : Any = mapping.pop("model.embed_positions.weight" ) _snake_case ,_snake_case : str = torch_model.load_state_dict(__lowercase , strict=__lowercase ) _snake_case : List[str] = [ k for k in missing if k not in [ "final_logits_bias", "model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight", ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def snake_case (__lowercase ) -> Dict: '''simple docstring''' _snake_case : Optional[Any] = tf.train.list_variables(__lowercase ) _snake_case : str = {} _snake_case : int = ["global_step"] for name, shape in tqdm(__lowercase , desc="converting tf checkpoint to dict" ): _snake_case : List[str] = any(pat in name for pat in ignore_name ) if skip_key: continue _snake_case : Tuple = tf.train.load_variable(__lowercase , __lowercase ) _snake_case : Optional[int] = array return tf_weights def snake_case (__lowercase , __lowercase , __lowercase ) -> int: '''simple docstring''' _snake_case : Tuple = get_tf_weights_as_numpy(__lowercase ) _snake_case : Optional[Any] = convert_bigbird_pegasus(__lowercase , __lowercase ) torch_model.save_pretrained(__lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') __SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() __SCREAMING_SNAKE_CASE : List[str] = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
580
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def snake_case (__lowercase ) -> List[Any]: '''simple docstring''' _snake_case : int = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class lowercase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _lowerCamelCase = StableDiffusionLatentUpscalePipeline _lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'height', 'width', 'cross_attention_kwargs', 'negative_prompt_embeds', 'prompt_embeds', } _lowerCamelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'} _lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _lowerCamelCase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowerCamelCase = frozenset([] ) _lowerCamelCase = True @property def UpperCamelCase ( self ): _snake_case : Optional[int] = 1 _snake_case : Any = 4 _snake_case : int = (16, 16) _snake_case : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ ) return image def UpperCamelCase ( self ): torch.manual_seed(0 ) _snake_case : Tuple = UNetaDConditionModel( act_fn="gelu" , attention_head_dim=8 , norm_num_groups=lowercase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ) , in_channels=8 , mid_block_type=lowercase_ , only_cross_attention=lowercase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , ) _snake_case : Any = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) _snake_case : Union[str, Any] = EulerDiscreteScheduler(prediction_type="sample" ) _snake_case : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="quick_gelu" , projection_dim=512 , ) _snake_case : Optional[int] = CLIPTextModel(lowercase_ ) _snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _snake_case : int = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase ( self , lowercase_ , lowercase_=0 ): if str(lowercase_ ).startswith("mps" ): _snake_case : Dict = torch.manual_seed(lowercase_ ) else: _snake_case : Tuple = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) _snake_case : Optional[int] = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def UpperCamelCase ( self ): _snake_case : Tuple = "cpu" _snake_case : List[Any] = self.get_dummy_components() _snake_case : List[str] = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _snake_case : Union[str, Any] = self.get_dummy_inputs(lowercase_ ) _snake_case : Dict = pipe(**lowercase_ ).images _snake_case : Dict = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 256, 256, 3) ) _snake_case : Union[str, Any] = np.array( [0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] ) _snake_case : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_ , 1e-3 ) def UpperCamelCase ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 ) def UpperCamelCase ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 ) def UpperCamelCase ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def UpperCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=7e-3 ) def UpperCamelCase ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 ) def UpperCamelCase ( self ): super().test_save_load_local(expected_max_difference=3e-3 ) def UpperCamelCase ( self ): super().test_save_load_optional_components(expected_max_difference=3e-3 ) def UpperCamelCase ( self ): _snake_case : Dict = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] _snake_case : Dict = self.get_dummy_components() _snake_case : Optional[Any] = self.pipeline_class(**lowercase_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _snake_case : List[Any] = self.get_dummy_inputs(lowercase_ ) _snake_case : int = 2 _snake_case : List[str] = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue _snake_case : Union[str, Any] = getattr(lowercase_ , scheduler_enum.name ) _snake_case : Dict = scheduler_cls.from_config(pipe.scheduler.config ) _snake_case : Optional[int] = pipe(**lowercase_ )[0] outputs.append(lowercase_ ) assert check_same_shape(lowercase_ ) @require_torch_gpu @slow class lowercase_ ( unittest.TestCase ): def UpperCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self ): _snake_case : Any = torch.manual_seed(33 ) _snake_case : Optional[Any] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa ) pipe.to("cuda" ) _snake_case : int = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) _snake_case : int = "a photo of an astronaut high resolution, unreal engine, ultra realistic" _snake_case : Union[str, Any] = pipe(lowercase_ , generator=lowercase_ , output_type="latent" ).images _snake_case : Dict = upscaler( prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="np" , ).images[0] _snake_case : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean() ) < 5e-2 def UpperCamelCase ( self ): _snake_case : Dict = torch.manual_seed(33 ) _snake_case : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa ) upscaler.to("cuda" ) _snake_case : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" _snake_case : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) _snake_case : Any = upscaler( prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="np" , ).images[0] _snake_case : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max() ) < 5e-2
580
1
from decimal import Decimal, getcontext from math import ceil, factorial def a_ ( UpperCamelCase_ : int ) -> str: """simple docstring""" if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise TypeError('Undefined for non-integers' ) elif precision < 1: raise ValueError('Undefined for non-natural numbers' ) lowerCamelCase = precision lowerCamelCase = ceil(precision / 1_4 ) lowerCamelCase = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt() lowerCamelCase = 1 lowerCamelCase = 1_3_5_9_1_4_0_9 lowerCamelCase = Decimal(UpperCamelCase_ ) for k in range(1 , UpperCamelCase_ ): lowerCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCamelCase_ ) ** 3) linear_term += 5_4_5_1_4_0_1_3_4 exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowerCAmelCase : Optional[int] = 5_0 print(F'''The first {n} digits of pi is: {pi(n)}''')
246
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _lowerCAmelCase : Dict = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) _lowerCAmelCase : List[Any] = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) _lowerCAmelCase : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) _lowerCAmelCase : List[Any] = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) _lowerCAmelCase : List[Any] = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]), ('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) _lowerCAmelCase : List[str] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) _lowerCAmelCase : Optional[Any] = ( ('JH AH TH KH QH', 2_3), ('JH 9H TH KH QH', 2_2), ('JC KH JS JD JH', 2_1), ('KH KC 3S 3H 3D', 2_0), ('8C 9C 5C 3C TC', 1_9), ('JS QS 9H TS KH', 1_8), ('7C 7S KH 2H 7H', 1_7), ('3C KH 5D 5S KH', 1_6), ('QH 8H KD JH 8S', 1_5), ('2D 6D 9D TH 7D', 1_4), ) def a_ ( ) -> int: """simple docstring""" lowerCamelCase , lowerCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) ) lowerCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] lowerCamelCase , lowerCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def a_ ( UpperCamelCase_ : int = 1_0_0 ) -> int: """simple docstring""" return (generate_random_hand() for _ in range(UpperCamelCase_ )) @pytest.mark.parametrize('hand, expected' , UpperCamelCase_ ) def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] ) -> Dict: """simple docstring""" assert PokerHand(UpperCamelCase_ )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , UpperCamelCase_ ) def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> List[Any]: """simple docstring""" assert PokerHand(UpperCamelCase_ )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , UpperCamelCase_ ) def a_ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> Tuple: """simple docstring""" lowerCamelCase = PokerHand(UpperCamelCase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , UpperCamelCase_ ) def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> str: """simple docstring""" assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , UpperCamelCase_ ) def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ) -> Union[str, Any]: """simple docstring""" assert PokerHand(UpperCamelCase_ )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , UpperCamelCase_ ) def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> Union[str, Any]: """simple docstring""" assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> Optional[Any]: """simple docstring""" assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected def a_ ( ) -> Tuple: """simple docstring""" lowerCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS] lowerCamelCase = poker_hands.copy() shuffle(UpperCamelCase_ ) lowerCamelCase = chain(sorted(UpperCamelCase_ ) ) for index, hand in enumerate(UpperCamelCase_ ): assert hand == poker_hands[index] def a_ ( ) -> Tuple: """simple docstring""" lowerCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=UpperCamelCase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def a_ ( ) -> int: """simple docstring""" lowerCamelCase = PokerHand('2C 4S AS 3D 5C' ) lowerCamelCase = True lowerCamelCase = [5, 4, 3, 2, 1_4] for _ in range(1_0 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase = 0 lowerCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) lowerCamelCase = os.path.join(UpperCamelCase_ , 'poker_hands.txt' ) with open(UpperCamelCase_ ) as file_hand: for line in file_hand: lowerCamelCase = line[:1_4].strip() lowerCamelCase = line[1_5:].strip() lowerCamelCase , lowerCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ ) lowerCamelCase = player.compare_with(UpperCamelCase_ ) if output == "Win": answer += 1 assert answer == 3_7_6
246
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Union[str, Any] = { 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = [ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
570
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : List[str] ): UpperCAmelCase = torch.nn.Linear(10 , 10 ) UpperCAmelCase = torch.optim.SGD(model.parameters() , 0.1 ) UpperCAmelCase = Accelerator() UpperCAmelCase = accelerator.prepare(a__ ) try: pickle.loads(pickle.dumps(a__ ) ) except Exception as e: self.fail(f"Accelerated optimizer pickling failed with {e}" ) AcceleratorState._reset_state()
570
1
"""simple docstring""" import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class __UpperCamelCase ( unittest.TestCase ): def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Any=2 , UpperCAmelCase : Any=56 , UpperCAmelCase : Dict=True , UpperCAmelCase : str=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Tuple=99 , UpperCAmelCase : int=32 , UpperCAmelCase : Any=2 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : Optional[Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : List[Any]=0.0_2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]="block_sparse" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Optional[Any]=3 , ) -> int: lowerCAmelCase :Optional[int] = parent lowerCAmelCase :Dict = batch_size lowerCAmelCase :Any = seq_length lowerCAmelCase :Optional[int] = is_training lowerCAmelCase :List[str] = use_attention_mask lowerCAmelCase :Dict = use_token_type_ids lowerCAmelCase :int = use_labels lowerCAmelCase :Optional[int] = vocab_size lowerCAmelCase :Any = hidden_size lowerCAmelCase :Dict = num_hidden_layers lowerCAmelCase :List[Any] = num_attention_heads lowerCAmelCase :Optional[Any] = intermediate_size lowerCAmelCase :int = hidden_act lowerCAmelCase :Optional[Any] = hidden_dropout_prob lowerCAmelCase :Optional[Any] = attention_probs_dropout_prob lowerCAmelCase :int = max_position_embeddings lowerCAmelCase :Optional[Any] = type_vocab_size lowerCAmelCase :Any = type_sequence_label_size lowerCAmelCase :Tuple = initializer_range lowerCAmelCase :Optional[int] = num_choices lowerCAmelCase :Optional[int] = rescale_embeddings lowerCAmelCase :Any = attention_type lowerCAmelCase :str = use_bias lowerCAmelCase :Any = block_size lowerCAmelCase :str = num_random_blocks def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: lowerCAmelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase :Tuple = None if self.use_attention_mask: lowerCAmelCase :List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase :Optional[int] = None if self.use_token_type_ids: lowerCAmelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase :Dict = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: lowerCAmelCase :Optional[int] = self.prepare_config_and_inputs() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :List[str] = config_and_inputs lowerCAmelCase :Any = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask, } return config, inputs_dict @require_flax class __UpperCamelCase ( UpperCamelCase , unittest.TestCase ): lowercase_ : List[Any] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) lowercase_ : Optional[int] = False lowercase_ : Tuple = False def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: lowerCAmelCase :List[Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase__ ( self : Any ) -> Tuple: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase__ ( self : str ) -> Tuple: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: super().test_hidden_states_output() @slow def UpperCAmelCase__ ( self : Tuple ) -> Dict: for model_class_name in self.all_model_classes: lowerCAmelCase :Dict = model_class_name.from_pretrained('google/bigbird-roberta-base' ) self.assertIsNotNone(UpperCAmelCase ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase__ ( self : Optional[int] ) -> Any: lowerCAmelCase , lowerCAmelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase :int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) lowerCAmelCase :Tuple = model_class(UpperCAmelCase ) @jax.jit def model_jitted(UpperCAmelCase : str , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] ): return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase ) with self.subTest('JIT Enabled' ): lowerCAmelCase :Optional[int] = model_jitted(**UpperCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): lowerCAmelCase :int = model_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str=1e-5 , UpperCAmelCase : Tuple="outputs" , UpperCAmelCase : Any=None ) -> Optional[int]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith('outputs.attentions' ): return else: super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
553
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def UpperCAmelCase ( a__ , a__ , a__ , a__ ): '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def UpperCAmelCase ( a__ , a__ , a__ , a__ , a__=True ): '''simple docstring''' model.train() lowerCAmelCase :Dict = model(a__ ) lowerCAmelCase :Union[str, Any] = F.mse_loss(a__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(a__ ) def UpperCAmelCase ( a__ , a__=False ): '''simple docstring''' set_seed(42 ) lowerCAmelCase :Any = RegressionModel() lowerCAmelCase :List[str] = deepcopy(a__ ) lowerCAmelCase :str = RegressionDataset(length=80 ) lowerCAmelCase :Optional[Any] = DataLoader(a__ , batch_size=16 ) model.to(accelerator.device ) if sched: lowerCAmelCase :List[Any] = AdamW(params=model.parameters() , lr=1e-3 ) lowerCAmelCase :Dict = AdamW(params=ddp_model.parameters() , lr=1e-3 ) lowerCAmelCase :Tuple = LambdaLR(a__ , lr_lambda=lambda a__ : epoch**0.65 ) lowerCAmelCase :Optional[Any] = LambdaLR(a__ , lr_lambda=lambda a__ : epoch**0.65 ) # Make a copy of `model` if sched: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :int = accelerator.prepare(a__ , a__ , a__ , a__ ) else: lowerCAmelCase , lowerCAmelCase :Optional[int] = accelerator.prepare(a__ , a__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def UpperCAmelCase ( a__ ): '''simple docstring''' lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Any = get_training_setup(a__ ) # Use a single batch lowerCAmelCase , lowerCAmelCase :Any = next(iter(a__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCAmelCase , lowerCAmelCase :Optional[Any] = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase , lowerCAmelCase :Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a__ , a__ , a__ , a__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(a__ ): step_model(a__ , a__ , a__ , a__ ) else: # Sync grads step_model(a__ , a__ , a__ , a__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(a__ , a__ , a__ , a__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) lowerCAmelCase :Any = ddp_input[torch.randperm(len(a__ ) )] def UpperCAmelCase ( a__ ): '''simple docstring''' lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Dict = get_training_setup(a__ ) # Use a single batch lowerCAmelCase , lowerCAmelCase :str = next(iter(a__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCAmelCase , lowerCAmelCase :List[Any] = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase , lowerCAmelCase :int = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a__ , a__ , a__ , a__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(a__ ): step_model(a__ , a__ , a__ , a__ ) else: # Sync grads step_model(a__ , a__ , a__ , a__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) lowerCAmelCase :str = ddp_input[torch.randperm(len(a__ ) )] def UpperCAmelCase ( a__=False , a__=False ): '''simple docstring''' lowerCAmelCase :str = Accelerator( split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Optional[Any] = get_training_setup(a__ ) for iteration, batch in enumerate(a__ ): lowerCAmelCase , lowerCAmelCase :Union[str, Any] = batch.values() # Gather the distributed inputs and targs for the base model lowerCAmelCase , lowerCAmelCase :Dict = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase , lowerCAmelCase :Any = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a__ , a__ , a__ , a__ , a__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(a__ ): step_model(a__ , a__ , a__ , a__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(a__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) lowerCAmelCase :Optional[int] = ddp_input[torch.randperm(len(a__ ) )] GradientState._reset_state() def UpperCAmelCase ( a__=False , a__=False ): '''simple docstring''' lowerCAmelCase :Optional[int] = Accelerator( split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Union[str, Any] = get_training_setup(a__ , a__ ) for iteration, batch in enumerate(a__ ): lowerCAmelCase , lowerCAmelCase :Optional[int] = batch.values() # Gather the distributed inputs and targs for the base model lowerCAmelCase , lowerCAmelCase :Union[str, Any] = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase , lowerCAmelCase :Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(a__ , a__ , a__ , a__ , a__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(a__ ): step_model(a__ , a__ , a__ , a__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowerCAmelCase :int = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a__ )) if accelerator.num_processes > 1: check_model_parameters(a__ , a__ , a__ , a__ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def UpperCAmelCase ( ): '''simple docstring''' lowerCAmelCase :int = Accelerator() lowerCAmelCase :int = RegressionDataset(length=80 ) lowerCAmelCase :Optional[Any] = DataLoader(a__ , batch_size=16 ) lowerCAmelCase :Any = RegressionDataset(length=96 ) lowerCAmelCase :Dict = DataLoader(a__ , batch_size=16 ) lowerCAmelCase , lowerCAmelCase :Tuple = accelerator.prepare(a__ , a__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(a__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(a__ ) if iteration < len(a__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(a__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(a__ ) if batch_num < len(a__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def UpperCAmelCase ( ): '''simple docstring''' lowerCAmelCase :List[str] = Accelerator() lowerCAmelCase :Optional[int] = accelerator.state if state.local_process_index == 0: print('**Test `accumulate` gradient accumulation with dataloader break**' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('**Test NOOP `no_sync` context manager**' ) test_noop_sync(a__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('**Test Distributed `no_sync` context manager**' ) test_distributed_sync(a__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation, ' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(a__ , a__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(a__ , a__ ) def UpperCAmelCase ( a__ ): '''simple docstring''' main() if __name__ == "__main__": main()
553
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __A( unittest.TestCase ): """simple docstring""" @property def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model @property def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , ) return model @property def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.dummy_uncond_unet UpperCamelCase__ = DDIMScheduler() UpperCamelCase__ = self.dummy_vq_model UpperCamelCase__ = LDMPipeline(unet=SCREAMING_SNAKE_CASE_ , vqvae=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) ldm.to(SCREAMING_SNAKE_CASE_ ) ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="""numpy""" ).images UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = image[0, -3:, -3:, -1] UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) UpperCamelCase__ = 1E-2 if torch_device != """mps""" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" ) ldm.to(SCREAMING_SNAKE_CASE_ ) ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , output_type="""numpy""" ).images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCamelCase__ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] ) UpperCamelCase__ = 1E-2 if torch_device != """mps""" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
86
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # create attention mask UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.seq_length // 2 UpperCamelCase__ = 0 # first forward pass UpperCamelCase__ , UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCamelCase__ = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1 UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCamelCase__ = random_other_next_tokens # append to next input_ids and attn_mask UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , ) # get two different outputs UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval() UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # first forward pass UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[ """last_hidden_state""" ] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase__ = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = (BioGptForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = """left""" # Define PAD Token = EOS Token = 50256 UpperCamelCase__ = tokenizer.eos_token UpperCamelCase__ = model.config.eos_token_id # use different length sentences to test batching UpperCamelCase__ = [ """Hello, my dog is a little""", """Today, I""", ] UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) , ) UpperCamelCase__ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() UpperCamelCase__ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings ) UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ (self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = """multi_label_classification""" UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = 4_23_84 UpperCamelCase__ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) torch.manual_seed(0 ) UpperCamelCase__ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( **SCREAMING_SNAKE_CASE_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
86
1
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _snake_case : Optional[Any] = False class a (unittest.TestCase ): """simple docstring""" def __snake_case ( self : int , lowerCamelCase : Optional[Any]=32 ) -> Dict: set_seed(0 ) __snake_case : int = UNetaDModel(sample_size=lowerCamelCase , in_channels=3 , out_channels=3 ) __snake_case : List[Any] = torch.optim.SGD(model.parameters() , lr=0.00_01 ) return model, optimizer @slow def __snake_case ( self : str ) -> List[Any]: __snake_case : int = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable __snake_case : Optional[Any] = DDPMScheduler( num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCamelCase , ) __snake_case : Tuple = DDIMScheduler( num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCamelCase , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) __snake_case : Tuple = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCamelCase ) for _ in range(4 )] __snake_case : str = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase ) for _ in range(4 )] __snake_case : List[Any] = [torch.randint(0 , 1000 , (4,) ).long().to(lowerCamelCase ) for _ in range(4 )] # train with a DDPM scheduler __snake_case , __snake_case : str = self.get_model_optimizer(resolution=32 ) model.train().to(lowerCamelCase ) for i in range(4 ): optimizer.zero_grad() __snake_case : Optional[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) __snake_case : List[str] = model(lowerCamelCase , timesteps[i] ).sample __snake_case : Tuple = torch.nn.functional.mse_loss(lowerCamelCase , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM __snake_case , __snake_case : Optional[int] = self.get_model_optimizer(resolution=32 ) model.train().to(lowerCamelCase ) for i in range(4 ): optimizer.zero_grad() __snake_case : str = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) __snake_case : Any = model(lowerCamelCase , timesteps[i] ).sample __snake_case : Tuple = torch.nn.functional.mse_loss(lowerCamelCase , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) ) self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
81
from typing import Dict, Optional import numpy as np import datasets __A : Any = ''' IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. ''' __A : int = ''' Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric("mean_iou") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} ''' __A : Tuple = '''\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }''' def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = False, ) -> List[str]: '''simple docstring''' if label_map is not None: for old_id, new_id in label_map.items(): lowerCAmelCase : Union[str, Any] = new_id # turn into Numpy arrays lowerCAmelCase : int = np.array(_UpperCAmelCase ) lowerCAmelCase : Union[str, Any] = np.array(_UpperCAmelCase ) if reduce_labels: lowerCAmelCase : Union[str, Any] = 255 lowerCAmelCase : Any = label - 1 lowerCAmelCase : Tuple = 255 lowerCAmelCase : Any = label != ignore_index lowerCAmelCase : Dict = np.not_equal(_UpperCAmelCase, _UpperCAmelCase ) lowerCAmelCase : Optional[int] = pred_label[mask] lowerCAmelCase : Optional[int] = np.array(_UpperCAmelCase )[mask] lowerCAmelCase : str = pred_label[pred_label == label] lowerCAmelCase : Optional[Any] = np.histogram(_UpperCAmelCase, bins=_UpperCAmelCase, range=(0, num_labels - 1) )[0] lowerCAmelCase : str = np.histogram(_UpperCAmelCase, bins=_UpperCAmelCase, range=(0, num_labels - 1) )[0] lowerCAmelCase : Optional[Any] = np.histogram(_UpperCAmelCase, bins=_UpperCAmelCase, range=(0, num_labels - 1) )[0] lowerCAmelCase : List[str] = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = False, ) -> Any: '''simple docstring''' lowerCAmelCase : Optional[Any] = np.zeros((num_labels,), dtype=np.floataa ) lowerCAmelCase : Dict = np.zeros((num_labels,), dtype=np.floataa ) lowerCAmelCase : int = np.zeros((num_labels,), dtype=np.floataa ) lowerCAmelCase : int = np.zeros((num_labels,), dtype=np.floataa ) for result, gt_seg_map in zip(_UpperCAmelCase, _UpperCAmelCase ): lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = intersect_and_union( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, ) -> Any: '''simple docstring''' lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = total_intersect_and_union( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) # compute metrics lowerCAmelCase : str = {} lowerCAmelCase : Dict = total_area_intersect.sum() / total_area_label.sum() lowerCAmelCase : Optional[Any] = total_area_intersect / total_area_union lowerCAmelCase : Optional[int] = total_area_intersect / total_area_label lowerCAmelCase : Union[str, Any] = np.nanmean(_UpperCAmelCase ) lowerCAmelCase : Any = np.nanmean(_UpperCAmelCase ) lowerCAmelCase : Dict = all_acc lowerCAmelCase : Tuple = iou lowerCAmelCase : Optional[int] = acc if nan_to_num is not None: lowerCAmelCase : int = {metric: np.nan_to_num(_UpperCAmelCase, nan=_UpperCAmelCase ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): def lowercase__ ( self : str ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { 'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), 'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), } ) , reference_urls=[ 'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py' ] , ) def lowercase__ ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ): lowerCAmelCase : Optional[int] = mean_iou( results=UpperCAmelCase_ , gt_seg_maps=UpperCAmelCase_ , num_labels=UpperCAmelCase_ , ignore_index=UpperCAmelCase_ , nan_to_num=UpperCAmelCase_ , label_map=UpperCAmelCase_ , reduce_labels=UpperCAmelCase_ , ) return iou_result
343
0
import torch from transformers import AutoModel class _lowerCamelCase ( torch.nn.Module ): """simple docstring""" def __init__( self : Any , snake_case : Optional[int]="sayef/fsner-bert-base-uncased" ): super(__lowerCAmelCase , self ).__init__() __UpperCamelCase = AutoModel.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase ) __UpperCamelCase = torch.nn.CosineSimilarity(3 , 1E-08 ) __UpperCamelCase = torch.nn.Softmax(dim=1 ) def snake_case ( self : List[str] , **snake_case : Dict ): return self.bert(**__lowerCAmelCase ).last_hidden_state def snake_case ( self : Any , snake_case : Dict ): return token_embeddings.sum(2 , keepdim=__lowerCAmelCase ) def snake_case ( self : Optional[Any] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Dict=1 ): return self.softmax(T * self.cos(__lowerCAmelCase , __lowerCAmelCase ) ) def snake_case ( self : str , snake_case : str , snake_case : str ): __UpperCamelCase = W_supports['''sizes'''].tolist() __UpperCamelCase = W_supports['''start_token_id'''].item() __UpperCamelCase = W_supports['''end_token_id'''].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __UpperCamelCase = self.BERT(**__lowerCAmelCase ) __UpperCamelCase = self.BERT(**__lowerCAmelCase ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = W_supports['''input_ids'''] == start_token_id __UpperCamelCase = W_supports['''input_ids'''] == end_token_id for i, size in enumerate(__lowerCAmelCase ): if i == 0: __UpperCamelCase = 0 else: __UpperCamelCase = support_sizes[i - 1] __UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]] __UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]] __UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __UpperCamelCase = torch.vstack((p_starts, p_start) ) __UpperCamelCase = torch.vstack((p_ends, p_end) ) else: __UpperCamelCase = p_start __UpperCamelCase = p_end return p_starts, p_ends
707
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = False ) -> str: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ): __UpperCamelCase = F"Expected string as input, found {type(lowercase_ )}" raise ValueError(lowercase_ ) if not isinstance(lowercase_ , lowercase_ ): __UpperCamelCase = F"Expected boolean as use_pascal parameter, found {type(lowercase_ )}" raise ValueError(lowercase_ ) __UpperCamelCase = input_str.split('''_''' ) __UpperCamelCase = 0 if use_pascal else 1 __UpperCamelCase = words[start_index:] __UpperCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize] __UpperCamelCase = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
375
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Dict = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class UpperCamelCase__ ( __lowercase ): """simple docstring""" __magic_name__ = "switch_transformers" __magic_name__ = ["past_key_values"] __magic_name__ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self , snake_case__=3_2128 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=3 , snake_case__=12 , snake_case__=3 , snake_case__=12 , snake_case__=8 , snake_case__=False , snake_case__=0.01 , snake_case__="float32" , snake_case__=False , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1E-6 , snake_case__=0.001 , snake_case__=0.001 , snake_case__=1.0 , snake_case__="relu" , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=0 , snake_case__=1 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : str = vocab_size _lowerCAmelCase : Tuple = d_model _lowerCAmelCase : Dict = d_kv _lowerCAmelCase : str = d_ff _lowerCAmelCase : int = num_sparse_encoder_layers _lowerCAmelCase : Dict = num_layers _lowerCAmelCase : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _lowerCAmelCase : Dict = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: _lowerCAmelCase : int = self.num_layers // self.num_sparse_encoder_layers else: _lowerCAmelCase : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: _lowerCAmelCase : Optional[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: _lowerCAmelCase : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers _lowerCAmelCase : Any = num_heads _lowerCAmelCase : List[Any] = num_experts _lowerCAmelCase : List[str] = expert_capacity _lowerCAmelCase : List[str] = router_bias _lowerCAmelCase : Optional[Any] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' ) _lowerCAmelCase : List[str] = router_dtype _lowerCAmelCase : Any = router_ignore_padding_tokens _lowerCAmelCase : Optional[Any] = relative_attention_num_buckets _lowerCAmelCase : Optional[int] = relative_attention_max_distance _lowerCAmelCase : List[Any] = dropout_rate _lowerCAmelCase : Optional[int] = layer_norm_epsilon _lowerCAmelCase : Union[str, Any] = initializer_factor _lowerCAmelCase : int = feed_forward_proj _lowerCAmelCase : List[str] = use_cache _lowerCAmelCase : Optional[int] = add_router_probs _lowerCAmelCase : Optional[int] = router_z_loss_coef _lowerCAmelCase : List[str] = router_aux_loss_coef _lowerCAmelCase : Union[str, Any] = self.feed_forward_proj.split('-' ) _lowerCAmelCase : int = act_info[-1] _lowerCAmelCase : int = act_info[0] == '''gated''' if len(_A ) > 1 and act_info[0] != "gated" or len(_A ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": _lowerCAmelCase : Optional[Any] = '''gelu_new''' super().__init__( pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , **_A , )
444
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class A_ ( __lowercase ): '''simple docstring''' def __init__( self , *_A , **_A) -> None: """simple docstring""" warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A)
485
0
'''simple docstring''' import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC __A : int = parse(importlib.metadata.version('torch')) def lowerCAmelCase_ ( a : Union[str, Version] , a : str , a : str ): if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) a__ = STR_OPERATION_TO_FUNC[operation] if isinstance(a , a ): a__ = parse(importlib.metadata.version(a ) ) return operation(a , parse(a ) ) def lowerCAmelCase_ ( a : str , a : str ): return compare_versions(a , a , a )
126
'''simple docstring''' class _UpperCamelCase : '''simple docstring''' def __init__( self , _a ): """simple docstring""" a__ = val a__ = None a__ = None def lowercase__ ( self , _a ): """simple docstring""" if self.val: if val < self.val: if self.left is None: a__ = Node(_a ) else: self.left.insert(_a ) elif val > self.val: if self.right is None: a__ = Node(_a ) else: self.right.insert(_a ) else: a__ = val def lowerCAmelCase_ ( a : Dict , a : Union[str, Any] ): # Recursive traversal if root: inorder(root.left , a ) res.append(root.val ) inorder(root.right , a ) def lowerCAmelCase_ ( a : List[str] ): # Build BST if len(a ) == 0: return arr a__ = Node(arr[0] ) for i in range(1 , len(a ) ): root.insert(arr[i] ) # Traverse BST in order. a__ = [] inorder(a , a ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
126
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ ( __a ): """simple docstring""" lowerCAmelCase : Dict = '''mobilenet_v2''' def __init__( self : List[str] , _lowercase : List[str]=3 , _lowercase : Union[str, Any]=224 , _lowercase : Dict=1.0 , _lowercase : List[Any]=8 , _lowercase : Optional[int]=8 , _lowercase : List[Any]=6 , _lowercase : int=32 , _lowercase : List[Any]=True , _lowercase : List[Any]=True , _lowercase : Optional[Any]="relu6" , _lowercase : Tuple=True , _lowercase : Optional[int]=0.8 , _lowercase : List[str]=0.02 , _lowercase : List[Any]=0.001 , _lowercase : Dict=255 , **_lowercase : Union[str, Any] , ): """simple docstring""" super().__init__(**_lowercase ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) _UpperCamelCase: List[str] = num_channels _UpperCamelCase: Optional[Any] = image_size _UpperCamelCase: List[str] = depth_multiplier _UpperCamelCase: List[str] = depth_divisible_by _UpperCamelCase: Optional[Any] = min_depth _UpperCamelCase: str = expand_ratio _UpperCamelCase: Union[str, Any] = output_stride _UpperCamelCase: str = first_layer_is_expansion _UpperCamelCase: Any = finegrained_output _UpperCamelCase: Optional[int] = hidden_act _UpperCamelCase: int = tf_padding _UpperCamelCase: Any = classifier_dropout_prob _UpperCamelCase: Optional[int] = initializer_range _UpperCamelCase: Optional[int] = layer_norm_eps _UpperCamelCase: int = semantic_loss_ignore_index class __magic_name__ ( __a ): """simple docstring""" lowerCAmelCase : List[str] = version.parse('''1.11''' ) @property def lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def lowerCAmelCase ( self : Optional[int] ): """simple docstring""" if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" return 1E-4
271
from __future__ import annotations def lowerCAmelCase_ ( lowercase: str , lowercase: list[str] | None = None , lowercase: dict[str, float] | None = None , lowercase: bool = False , ) -> tuple[int, float, str]: '''simple docstring''' _UpperCamelCase: Any = cipher_alphabet or [chr(lowercase ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) _UpperCamelCase: Any = { '''a''': 0.08497, '''b''': 0.01492, '''c''': 0.02202, '''d''': 0.04253, '''e''': 0.11162, '''f''': 0.02228, '''g''': 0.02015, '''h''': 0.06094, '''i''': 0.07546, '''j''': 0.00153, '''k''': 0.01292, '''l''': 0.04025, '''m''': 0.02406, '''n''': 0.06749, '''o''': 0.07507, '''p''': 0.01929, '''q''': 0.00095, '''r''': 0.07587, '''s''': 0.06327, '''t''': 0.09356, '''u''': 0.02758, '''v''': 0.00978, '''w''': 0.02560, '''x''': 0.00150, '''y''': 0.01994, '''z''': 0.00077, } else: # Custom frequencies dictionary _UpperCamelCase: str = frequencies_dict if not case_sensitive: _UpperCamelCase: Union[str, Any] = ciphertext.lower() # Chi squared statistic values _UpperCamelCase: dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(lowercase ) ): _UpperCamelCase: Tuple = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet _UpperCamelCase: Optional[int] = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter _UpperCamelCase: Optional[int] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: _UpperCamelCase: Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase: Any = decrypted_with_shift.lower().count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase: int = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase: int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase: List[Any] = decrypted_with_shift.count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase: Union[str, Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase: List[str] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary _UpperCamelCase: List[Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase: int ) -> tuple[float, str]: return chi_squared_statistic_values[key] _UpperCamelCase: int = min( lowercase , key=lowercase , ) # Get all the data from the most likely cipher (key, decoded message) ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ): str = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
271
1
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase): snake_case__ = StableDiffusionDiffEditPipeline snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} snake_case__ = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case__ = frozenset([]) def _UpperCamelCase ( self : Tuple ) -> List[Any]: torch.manual_seed(0 ) _UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , ) _UpperCamelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , ) _UpperCamelCase = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCamelCase , set_alpha_to_zero=__UpperCamelCase , ) torch.manual_seed(0 ) _UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) _UpperCamelCase = CLIPTextModel(__UpperCamelCase ) _UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _UpperCamelCase = { '''unet''': unet, '''scheduler''': scheduler, '''inverse_scheduler''': inverse_scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _UpperCamelCase ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : str=0 ) -> str: _UpperCamelCase = floats_tensor((1, 16, 16) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) _UpperCamelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) if str(__UpperCamelCase ).startswith('''mps''' ): _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) else: _UpperCamelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) _UpperCamelCase = { '''prompt''': '''a dog and a newt''', '''mask_image''': mask, '''image_latents''': latents, '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str]=0 ) -> Any: _UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) _UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCamelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('''RGB''' ) if str(__UpperCamelCase ).startswith('''mps''' ): _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) else: _UpperCamelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) _UpperCamelCase = { '''image''': image, '''source_prompt''': '''a cat and a frog''', '''target_prompt''': '''a dog and a newt''', '''generator''': generator, '''num_inference_steps''': 2, '''num_maps_per_mask''': 2, '''mask_encode_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple=0 ) -> Any: _UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) _UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCamelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('''RGB''' ) if str(__UpperCamelCase ).startswith('''mps''' ): _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) else: _UpperCamelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) _UpperCamelCase = { '''image''': image, '''prompt''': '''a cat and a frog''', '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''decode_latents''': True, '''output_type''': '''numpy''', } return inputs def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple: if not hasattr(self.pipeline_class , '''_optional_components''' ): return _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = self.pipeline_class(**__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) _UpperCamelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCamelCase = pipe(**__UpperCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__UpperCamelCase ) _UpperCamelCase = self.pipeline_class.from_pretrained(__UpperCamelCase ) pipe_loaded.to(__UpperCamelCase ) pipe_loaded.set_progress_bar_config(disable=__UpperCamelCase ) for optional_component in pipe._optional_components: self.assertTrue( getattr(__UpperCamelCase , __UpperCamelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) _UpperCamelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCamelCase = pipe_loaded(**__UpperCamelCase )[0] _UpperCamelCase = np.abs(output - output_loaded ).max() self.assertLess(__UpperCamelCase , 1E-4 ) def _UpperCamelCase ( self : Any ) -> Any: _UpperCamelCase = '''cpu''' _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = self.pipeline_class(**__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = self.get_dummy_mask_inputs(__UpperCamelCase ) _UpperCamelCase = pipe.generate_mask(**__UpperCamelCase ) _UpperCamelCase = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) _UpperCamelCase = np.array([0] * 9 ) _UpperCamelCase = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCamelCase , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: _UpperCamelCase = '''cpu''' _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = self.pipeline_class(**__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = self.get_dummy_inversion_inputs(__UpperCamelCase ) _UpperCamelCase = pipe.invert(**__UpperCamelCase ).images _UpperCamelCase = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) _UpperCamelCase = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) _UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCamelCase , 1E-3 ) def _UpperCamelCase ( self : Dict ) -> Any: super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]: _UpperCamelCase = '''cpu''' _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = {'''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''beta_schedule''': '''scaled_linear'''} _UpperCamelCase = DPMSolverMultistepScheduler(**__UpperCamelCase ) _UpperCamelCase = DPMSolverMultistepInverseScheduler(**__UpperCamelCase ) _UpperCamelCase = self.pipeline_class(**__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = self.get_dummy_inversion_inputs(__UpperCamelCase ) _UpperCamelCase = pipe.invert(**__UpperCamelCase ).images _UpperCamelCase = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) _UpperCamelCase = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) _UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCamelCase , 1E-3 ) @require_torch_gpu @slow class UpperCAmelCase_ ( unittest.TestCase): def _UpperCamelCase ( self : Optional[Any] ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _UpperCamelCase ( cls : Union[str, Any] ) -> str: _UpperCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' ) _UpperCamelCase = raw_image.convert('''RGB''' ).resize((768, 768) ) _UpperCamelCase = raw_image def _UpperCamelCase ( self : Dict ) -> Tuple: _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) _UpperCamelCase = DDIMScheduler.from_config(pipe.scheduler.config ) _UpperCamelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = '''a bowl of fruit''' _UpperCamelCase = '''a bowl of pears''' _UpperCamelCase = pipe.generate_mask( image=self.raw_image , source_prompt=__UpperCamelCase , target_prompt=__UpperCamelCase , generator=__UpperCamelCase , ) _UpperCamelCase = pipe.invert( prompt=__UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCamelCase ).latents _UpperCamelCase = pipe( prompt=__UpperCamelCase , mask_image=__UpperCamelCase , image_latents=__UpperCamelCase , generator=__UpperCamelCase , negative_prompt=__UpperCamelCase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0] _UpperCamelCase = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def _UpperCamelCase ( self : List[str] ) -> Dict: _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) _UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _UpperCamelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = '''a bowl of fruit''' _UpperCamelCase = '''a bowl of pears''' _UpperCamelCase = pipe.generate_mask( image=self.raw_image , source_prompt=__UpperCamelCase , target_prompt=__UpperCamelCase , generator=__UpperCamelCase , ) _UpperCamelCase = pipe.invert( prompt=__UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCamelCase , num_inference_steps=25 , ).latents _UpperCamelCase = pipe( prompt=__UpperCamelCase , mask_image=__UpperCamelCase , image_latents=__UpperCamelCase , generator=__UpperCamelCase , negative_prompt=__UpperCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0] _UpperCamelCase = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
709
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_lowercase): snake_case__ = ['''torch''', '''scipy'''] def __init__( self : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Any ) -> Any: requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def _UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : List[Any] ) -> str: requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def _UpperCamelCase ( cls : str , *__UpperCamelCase : Any , **__UpperCamelCase : int ) -> int: requires_backends(cls , ['''torch''', '''scipy'''] )
342
0
import argparse from collections import defaultdict import yaml UpperCAmelCase_ : List[str] = '''docs/source/en/_toctree.yml''' def __SCREAMING_SNAKE_CASE ( a__ : int ) -> Union[str, Any]: __A : Union[str, Any] = defaultdict(a__ ) __A : Dict = [] __A : Dict = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(a__ ) __A : Optional[int] = new_doc_list __A : Optional[int] = [key for key, value in counts.items() if value > 1] __A : Tuple = [] for duplicate_key in duplicates: __A : Union[str, Any] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(a__ ) > 1: raise ValueError( f"""{duplicate_key} is present several times in the documentation table of content at """ """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) __A : str = sorted(a__ ,key=lambda a__ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(a__ ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(a__ ) # Sort return overview_doc def __SCREAMING_SNAKE_CASE ( a__ : Tuple=False ) -> List[str]: with open(a__ ,encoding="""utf-8""" ) as f: __A : Optional[Any] = yaml.safe_load(f.read() ) # Get to the API doc __A : List[str] = 0 while content[api_idx]["title"] != "API": api_idx += 1 __A : Tuple = content[api_idx]["""sections"""] # Then to the model doc __A : List[Any] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 __A : List[Any] = api_doc[scheduler_idx]["""sections"""] __A : Tuple = clean_doc_toc(a__ ) __A : Tuple = False if new_scheduler_doc != scheduler_doc: __A : List[str] = True if overwrite: __A : List[str] = new_scheduler_doc if diff: if overwrite: __A : Optional[int] = api_doc with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(yaml.dump(a__ ,allow_unicode=a__ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def __SCREAMING_SNAKE_CASE ( a__ : Any=False ) -> Any: with open(a__ ,encoding="""utf-8""" ) as f: __A : Union[str, Any] = yaml.safe_load(f.read() ) # Get to the API doc __A : Union[str, Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 __A : Dict = content[api_idx]["""sections"""] # Then to the model doc __A : Dict = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 __A : Optional[Any] = False __A : List[str] = api_doc[pipeline_idx]["""sections"""] __A : Optional[int] = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: __A : List[Any] = pipeline_doc["""section"""] __A : int = clean_doc_toc(a__ ) if overwrite: __A : Dict = new_sub_pipeline_doc new_pipeline_docs.append(a__ ) # sort overall pipeline doc __A : Tuple = clean_doc_toc(a__ ) if new_pipeline_docs != pipeline_docs: __A : List[str] = True if overwrite: __A : List[Any] = new_pipeline_docs if diff: if overwrite: __A : Optional[Any] = api_doc with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(yaml.dump(a__ ,allow_unicode=a__ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') UpperCAmelCase_ : Tuple = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
17
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : int = [1] for i in range(2 , snake_case__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCamelCase : List[Any] = [] UpperCamelCase : List[Any] = list(range(snake_case__ ) ) # Find permutation while factorials: UpperCamelCase : int = factorials.pop() UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
40
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ : Dict = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
444
import os # Precomputes a list of the 100 first triangular numbers a_ : str = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(_UpperCAmelCase)) SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'words.txt') SCREAMING_SNAKE_CASE = '' with open(_UpperCAmelCase) as f: SCREAMING_SNAKE_CASE = f.readline() SCREAMING_SNAKE_CASE = [word.strip('"') for word in words.strip('\r\n').split(',')] SCREAMING_SNAKE_CASE = [ word for word in [sum(ord(_UpperCAmelCase) - 64 for x in word) for word in words] if word in TRIANGULAR_NUMBERS ] return len(_UpperCAmelCase) if __name__ == "__main__": print(solution())
444
1
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') a : Optional[int] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:]) a : Any = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) a : List[Any] = BeautifulSoup(res.text, 'html.parser') a : Dict = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(F'''https://google.com{link.get("href")}''')
556
import collections import importlib.util import os import re from pathlib import Path SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers' # Matches is_xxx_available() SCREAMING_SNAKE_CASE__ : List[Any] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} SCREAMING_SNAKE_CASE__ : str = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available SCREAMING_SNAKE_CASE__ : Dict = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] SCREAMING_SNAKE_CASE__ : Optional[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo SCREAMING_SNAKE_CASE__ : List[Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: SCREAMING_SNAKE_CASE__ : Optional[Any] = re.compile(R'^\s*try:') # Catches a line with else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(R'^\s*else:') def a__ ( snake_case__ : Union[str, Any] ): if _re_test_backend.search(snake_case__ ) is None: return None _UpperCAmelCase : str = [b[0] for b in _re_backend.findall(snake_case__ )] backends.sort() return "_and_".join(snake_case__ ) def a__ ( snake_case__ : Optional[int] ): with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _UpperCAmelCase : Optional[Any] = f.readlines() _UpperCAmelCase : int = 0 while line_index < len(snake_case__ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(snake_case__ ): return None # First grab the objects without a specific backend in _import_structure _UpperCAmelCase : int = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: _UpperCAmelCase : int = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(snake_case__ ): _UpperCAmelCase : Optional[Any] = _re_one_line_import_struct.search(snake_case__ ).groups()[0] _UpperCAmelCase : str = re.findall("""\[([^\]]+)\]""" , snake_case__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue _UpperCAmelCase : int = _re_import_struct_key_value.search(snake_case__ ) if single_line_import_search is not None: _UpperCAmelCase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(snake_case__ ) > 0] objects.extend(snake_case__ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 _UpperCAmelCase : str = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. _UpperCAmelCase : Tuple = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _UpperCAmelCase : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _UpperCAmelCase : List[Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): _UpperCAmelCase : Optional[int] = lines[line_index] if _re_import_struct_add_one.search(snake_case__ ) is not None: objects.append(_re_import_struct_add_one.search(snake_case__ ).groups()[0] ) elif _re_import_struct_add_many.search(snake_case__ ) is not None: _UpperCAmelCase : str = _re_import_struct_add_many.search(snake_case__ ).groups()[0].split(""", """ ) _UpperCAmelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0] objects.extend(snake_case__ ) elif _re_between_brackets.search(snake_case__ ) is not None: _UpperCAmelCase : str = _re_between_brackets.search(snake_case__ ).groups()[0].split(""", """ ) _UpperCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0] objects.extend(snake_case__ ) elif _re_quote_object.search(snake_case__ ) is not None: objects.append(_re_quote_object.search(snake_case__ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 _UpperCAmelCase : str = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _UpperCAmelCase : Optional[Any] = [] while ( line_index < len(snake_case__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): _UpperCAmelCase : Union[str, Any] = lines[line_index] _UpperCAmelCase : str = _re_import.search(snake_case__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 _UpperCAmelCase : int = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(snake_case__ ): # If the line is an if is_backend_available, we grab all objects associated. _UpperCAmelCase : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _UpperCAmelCase : Optional[int] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _UpperCAmelCase : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): _UpperCAmelCase : Union[str, Any] = lines[line_index] _UpperCAmelCase : Any = _re_import.search(snake_case__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 _UpperCAmelCase : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( snake_case__ : Any , snake_case__ : Optional[int] ): def find_duplicates(snake_case__ : Dict ): return [k for k, v in collections.Counter(snake_case__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _UpperCAmelCase : int = [] for key in import_dict_objects.keys(): _UpperCAmelCase : Any = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _UpperCAmelCase : Optional[int] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _UpperCAmelCase : Optional[int] = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): _UpperCAmelCase : Any = [] for root, _, files in os.walk(snake_case__ ): if "__init__.py" in files: _UpperCAmelCase : Optional[Any] = os.path.join(snake_case__ , """__init__.py""" ) _UpperCAmelCase : List[str] = parse_init(snake_case__ ) if objects is not None: _UpperCAmelCase : int = analyze_results(*snake_case__ ) if len(snake_case__ ) > 0: _UpperCAmelCase : Union[str, Any] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(snake_case__ ) ) if len(snake_case__ ) > 0: raise ValueError("""\n\n""".join(snake_case__ ) ) def a__ ( ): _UpperCAmelCase : Tuple = [] for path, directories, files in os.walk(snake_case__ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(snake_case__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(snake_case__ ) / folder).glob("""*.py""" ) ) ) == 0: continue _UpperCAmelCase : Dict = str((Path(snake_case__ ) / folder).relative_to(snake_case__ ) ) _UpperCAmelCase : Union[str, Any] = short_path.replace(os.path.sep , """.""" ) submodules.append(snake_case__ ) for fname in files: if fname == "__init__.py": continue _UpperCAmelCase : Optional[Any] = str((Path(snake_case__ ) / fname).relative_to(snake_case__ ) ) _UpperCAmelCase : Optional[Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(snake_case__ ) return submodules SCREAMING_SNAKE_CASE__ : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : int = importlib.util.spec_from_file_location( """transformers""" , os.path.join(snake_case__ , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) _UpperCAmelCase : Optional[int] = spec.loader.load_module() _UpperCAmelCase : int = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(snake_case__ ) > 0: _UpperCAmelCase : Dict = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
643
0
'''simple docstring''' import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } UpperCamelCase_ = { "vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"}, "merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"}, } UpperCamelCase_ = { "ctrl": 256, } UpperCamelCase_ = { "Pregnancy": 168629, "Christianity": 7675, "Explain": 106423, "Fitness": 63440, "Saving": 63163, "Ask": 27171, "Ass": 95985, "Joke": 163509, "Questions": 45622, "Thoughts": 49605, "Retail": 52342, "Feminism": 164338, "Writing": 11992, "Atheism": 192263, "Netflix": 48616, "Computing": 39639, "Opinion": 43213, "Alone": 44967, "Funny": 58917, "Gaming": 40358, "Human": 4088, "India": 1331, "Joker": 77138, "Diet": 36206, "Legal": 11859, "Norman": 4939, "Tip": 72689, "Weight": 52343, "Movies": 46273, "Running": 23425, "Science": 2090, "Horror": 37793, "Confession": 60572, "Finance": 12250, "Politics": 16360, "Scary": 191985, "Support": 12654, "Technologies": 32516, "Teenage": 66160, "Event": 32769, "Learned": 67460, "Notion": 182770, "Wikipedia": 37583, "Books": 6665, "Extract": 76050, "Confessions": 102701, "Conspiracy": 75932, "Links": 63674, "Narcissus": 150425, "Relationship": 54766, "Relationships": 134796, "Reviews": 41671, "News": 4256, "Translation": 26820, "multilingual": 128406, } def lowerCAmelCase__ ( a_ : List[Any] ) -> List[str]: UpperCAmelCase__ : int = set() UpperCAmelCase__ : str = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Optional[int] = set(a_ ) return pairs class __UpperCAmelCase ( UpperCamelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Optional[int] = CONTROL_CODES def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<unk>" , **_UpperCAmelCase ): super().__init__(unk_token=_UpperCAmelCase , **_UpperCAmelCase ) with open(_UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle: UpperCAmelCase__ : Union[str, Any] = json.load(_UpperCAmelCase ) UpperCAmelCase__ : Dict = {v: k for k, v in self.encoder.items()} with open(_UpperCAmelCase , encoding='''utf-8''' ) as merges_handle: UpperCAmelCase__ : int = merges_handle.read().split('''\n''' )[1:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges] UpperCAmelCase__ : Optional[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase__ : int = {} @property def lowerCamelCase ( self ): return len(self.encoder ) def lowerCamelCase ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase ( self , _UpperCAmelCase ): if token in self.cache: return self.cache[token] UpperCAmelCase__ : Union[str, Any] = tuple(_UpperCAmelCase ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) UpperCAmelCase__ : Tuple = get_pairs(_UpperCAmelCase ) if not pairs: return token while True: UpperCAmelCase__ : Tuple = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : str = bigram UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Any = 0 while i < len(_UpperCAmelCase ): try: UpperCAmelCase__ : Tuple = word.index(_UpperCAmelCase , _UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : int = j if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Optional[int] = tuple(_UpperCAmelCase ) UpperCAmelCase__ : List[str] = new_word if len(_UpperCAmelCase ) == 1: break else: UpperCAmelCase__ : Union[str, Any] = get_pairs(_UpperCAmelCase ) UpperCAmelCase__ : List[str] = '''@@ '''.join(_UpperCAmelCase ) UpperCAmelCase__ : List[Any] = word[:-4] UpperCAmelCase__ : Optional[Any] = word return word def lowerCamelCase ( self , _UpperCAmelCase ): UpperCAmelCase__ : int = [] UpperCAmelCase__ : List[Any] = re.findall(R'''\S+\n?''' , _UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(_UpperCAmelCase ).split(''' ''' ) ) ) return split_tokens def lowerCamelCase ( self , _UpperCAmelCase ): return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) ) def lowerCamelCase ( self , _UpperCAmelCase ): return self.decoder.get(_UpperCAmelCase , self.unk_token ) def lowerCamelCase ( self , _UpperCAmelCase ): UpperCAmelCase__ : Any = ''' '''.join(_UpperCAmelCase ).replace('''@@ ''' , '''''' ).strip() return out_string def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase__ : Optional[Any] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : Union[str, Any] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '''\n''' ) UpperCAmelCase__ : str = 0 with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) UpperCAmelCase__ : Dict = token_index writer.write(''' '''.join(_UpperCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
599
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ): UpperCAmelCase__ : Any = size if size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase__ : str = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : Tuple = image_size UpperCAmelCase__ : List[str] = min_resolution UpperCAmelCase__ : List[str] = max_resolution UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : Dict = size UpperCAmelCase__ : Tuple = do_normalize UpperCAmelCase__ : List[str] = image_mean UpperCAmelCase__ : List[Any] = image_std def lowerCamelCase ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = DPTImageProcessor if is_vision_available() else None def lowerCamelCase ( self ): UpperCAmelCase__ : Optional[int] = DPTImageProcessingTester(self ) @property def lowerCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase ( self ): UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) ) def lowerCamelCase ( self ): UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def lowerCamelCase ( self ): # Initialize image_processing UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCAmelCase__ : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowerCamelCase ( self ): # Initialize image_processing UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCAmelCase__ : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowerCamelCase ( self ): # Initialize image_processing UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCAmelCase__ : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
599
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Any = logging.get_logger(__name__) _a : Optional[Any] = { "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : List[Any] = "transfo-xl" _SCREAMING_SNAKE_CASE : Any = ["mems"] _SCREAMING_SNAKE_CASE : int = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any]=26_7735 , SCREAMING_SNAKE_CASE_ : Any=[2_0000, 4_0000, 20_0000] , SCREAMING_SNAKE_CASE_ : Any=1024 , SCREAMING_SNAKE_CASE_ : Any=1024 , SCREAMING_SNAKE_CASE_ : Optional[int]=16 , SCREAMING_SNAKE_CASE_ : List[str]=64 , SCREAMING_SNAKE_CASE_ : Tuple=4096 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : List[Any]=18 , SCREAMING_SNAKE_CASE_ : int=1600 , SCREAMING_SNAKE_CASE_ : Optional[int]=1000 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]=-1 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[str]="normal" , SCREAMING_SNAKE_CASE_ : Any=0.0_1 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE_ : int=1e-5 , SCREAMING_SNAKE_CASE_ : Optional[int]=0 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Optional[int]: __snake_case = vocab_size __snake_case = [] self.cutoffs.extend(SCREAMING_SNAKE_CASE_ ) if proj_share_all_but_first: __snake_case = [False] + [True] * len(self.cutoffs ) else: __snake_case = [False] + [False] * len(self.cutoffs ) __snake_case = d_model __snake_case = d_embed __snake_case = d_head __snake_case = d_inner __snake_case = div_val __snake_case = pre_lnorm __snake_case = n_layer __snake_case = n_head __snake_case = mem_len __snake_case = same_length __snake_case = attn_type __snake_case = clamp_len __snake_case = sample_softmax __snake_case = adaptive __snake_case = dropout __snake_case = dropatt __snake_case = untie_r __snake_case = init __snake_case = init_range __snake_case = proj_init_std __snake_case = init_std __snake_case = layer_norm_epsilon super().__init__(eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def a ( self : Optional[int] ) -> str: # Message copied from Transformer-XL documentation logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def a ( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]: # Message copied from Transformer-XL documentation raise NotImplementedError( f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
56
'''simple docstring''' from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str: """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release: # old versions of hfh don't url-encode the file path __snake_case = quote(lowercase__ ) return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
56
1
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class snake_case ( UpperCAmelCase ): @slow @require_torch def lowerCamelCase__ ( self : str ): '''simple docstring''' a : Optional[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' ) a : List[str] = BertTokenizer.from_pretrained('bert-base-uncased' ) a : int = bertabert.config.encoder.vocab_size a : List[str] = tokenizer.sep_token_id a : Optional[Any] = tokenizer.cls_token_id a : Optional[Any] = 1_2_8 a : Any = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' ) a : Optional[Any] = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' ) a : Dict = train_dataset.select(range(3_2 ) ) a : Any = val_dataset.select(range(1_6 ) ) a : int = 4 def _map_to_encoder_decoder_inputs(A : Optional[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] a : Dict = tokenizer(batch['article'] , padding='max_length' , truncation=A , max_length=5_1_2 ) a : Optional[int] = tokenizer(batch['highlights'] , padding='max_length' , truncation=A , max_length=1_2_8 ) a : int = inputs.input_ids a : Any = inputs.attention_mask a : Any = outputs.input_ids a : Optional[Any] = outputs.input_ids.copy() a : List[Any] = [ [-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] a : Dict = outputs.attention_mask assert all(len(A ) == 5_1_2 for x in inputs.input_ids ) assert all(len(A ) == 1_2_8 for x in outputs.input_ids ) return batch def _compute_metrics(A : Optional[Any] ): a : Optional[int] = pred.label_ids a : List[str] = pred.predictions # all unnecessary tokens are removed a : Union[str, Any] = tokenizer.batch_decode(A , skip_special_tokens=A ) a : Dict = tokenizer.batch_decode(A , skip_special_tokens=A ) a : Union[str, Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A ) )] ) / len(A ) return {"accuracy": accuracy} # map train dataset a : Dict = train_dataset.map( _map_to_encoder_decoder_inputs , batched=A , batch_size=A , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset a : Any = val_dataset.map( _map_to_encoder_decoder_inputs , batched=A , batch_size=A , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) a : Tuple = self.get_auto_remove_tmp_dir() a : str = SeqaSeqTrainingArguments( output_dir=A , per_device_train_batch_size=A , per_device_eval_batch_size=A , predict_with_generate=A , evaluation_strategy='steps' , do_train=A , do_eval=A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer a : Dict = SeqaSeqTrainer( model=A , args=A , compute_metrics=_compute_metrics , train_dataset=A , eval_dataset=A , tokenizer=A , ) # start training trainer.train()
118
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( UpperCAmelCase ): __magic_name__ = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) __magic_name__ = '''CIDAS/clipseg-rd64-refined''' __magic_name__ = '''image_segmenter''' __magic_name__ = CLIPSegForImageSegmentation __magic_name__ = ['''image''', '''text'''] __magic_name__ = ['''image'''] def __init__( self : str , *A : Any , **A : Any ): '''simple docstring''' requires_backends(self , ['vision'] ) super().__init__(*A , **A ) def lowerCamelCase__ ( self : Any , A : "Image" , A : str ): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=A , return_tensors='pt' ) def lowerCamelCase__ ( self : str , A : List[Any] ): '''simple docstring''' with torch.no_grad(): a : Optional[Any] = self.model(**A ).logits return logits def lowerCamelCase__ ( self : List[Any] , A : List[str] ): '''simple docstring''' a : List[str] = outputs.cpu().detach().numpy() a : List[Any] = 0 a : str = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
118
1
'''simple docstring''' from collections.abc import Iterable from typing import Generic, TypeVar __snake_case : Tuple = TypeVar('_T') class lowerCamelCase ( Generic[_T] ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : Iterable[_T] | None = None ) -> None: '''simple docstring''' A__ : list[_T] =list(iterable or [] ) A__ : list[_T] =[] def __len__( self : Union[str, Any] ) -> int: '''simple docstring''' return len(self._stacka ) + len(self._stacka ) def __repr__( self : Any ) -> str: '''simple docstring''' return f"Queue({tuple(self._stacka[::-1] + self._stacka )})" def lowercase__ ( self : Any , lowerCAmelCase_ : _T ) -> None: '''simple docstring''' self._stacka.append(lowerCAmelCase_ ) def lowercase__ ( self : int ) -> _T: '''simple docstring''' A__ : Any =self._stacka.pop A__ : Optional[Any] =self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
215
'''simple docstring''' def __lowerCamelCase ( __snake_case : Dict, __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : int, __snake_case : int, __snake_case : Tuple ) -> Dict: """simple docstring""" if index == r: for j in range(__snake_case ): print(data[j], end=""" """ ) print(""" """ ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location A__ : Optional[int] =arr[i] combination_util(__snake_case, __snake_case, __snake_case, index + 1, __snake_case, i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def __lowerCamelCase ( __snake_case : Any, __snake_case : Dict, __snake_case : str ) -> str: """simple docstring""" A__ : Union[str, Any] =[0] * r # Print all combination using temporary array 'data[]' combination_util(__snake_case, __snake_case, __snake_case, 0, __snake_case, 0 ) if __name__ == "__main__": # Driver code to check the function above __snake_case : List[Any] = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
215
1
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
1
'''simple docstring''' from __future__ import annotations A_ = list[tuple[int, int]] A_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class UpperCAmelCase : '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> str: '''simple docstring''' lowerCamelCase_ = pos_x lowerCamelCase_ = pos_y lowerCamelCase_ = (pos_y, pos_x) lowerCamelCase_ = goal_x lowerCamelCase_ = goal_y lowerCamelCase_ = g_cost lowerCamelCase_ = parent lowerCamelCase_ = self.calculate_heuristic() def UpperCamelCase( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = abs(self.pos_x - self.goal_x ) lowerCamelCase_ = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: '''simple docstring''' return self.f_cost < other.f_cost class UpperCAmelCase : '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: '''simple docstring''' lowerCamelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase__ ) lowerCamelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , lowercase__ ) lowerCamelCase_ = [self.start] lowerCamelCase_ = [] lowerCamelCase_ = False def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowerCamelCase_ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: lowerCamelCase_ = True return self.retrace_path(lowercase__ ) self.closed_nodes.append(lowercase__ ) lowerCamelCase_ = self.get_successors(lowercase__ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase__ ) else: # retrieve the best current path lowerCamelCase_ = self.open_nodes.pop(self.open_nodes.index(lowercase__ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase__ ) else: self.open_nodes.append(lowercase__ ) if not self.reached: return [self.start.pos] return None def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict: '''simple docstring''' lowerCamelCase_ = [] for action in delta: lowerCamelCase_ = parent.pos_x + action[1] lowerCamelCase_ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase__ , lowercase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase__ , ) ) return successors def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str: '''simple docstring''' lowerCamelCase_ = node lowerCamelCase_ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCamelCase_ = current_node.parent path.reverse() return path if __name__ == "__main__": A_ = (0, 0) A_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("------") A_ = GreedyBestFirst(init, goal) A_ = greedy_bf.search() if path: for pos_x, pos_y in path: A_ = 2 for elem in grid: print(elem)
42
'''simple docstring''' from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("""socket.socket""" ) @patch("""builtins.open""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ): """simple docstring""" a_ : Dict = Mock() a_ : List[str] = conn, Mock() a_ : Optional[int] = iter([1, None] ) a_ : List[str] = lambda UpperCamelCase__ : next(UpperCamelCase__ ) # ===== invoke ===== send_file(filename="""mytext.txt""" , testing=UpperCamelCase__ ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
442
0
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ): '''simple docstring''' __lowerCamelCase : Optional[int] =prime_factors(_lowerCamelCase ) if is_square_free(_lowerCamelCase ): return -1 if len(_lowerCamelCase ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
719
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
363
0
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCamelCase = logging.get_logger(__name__) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple: _lowercase : Optional[int] = tesseract_config if tesseract_config is not None else '' # apply OCR _lowercase : Any = to_pil_image(SCREAMING_SNAKE_CASE ) _lowercase , _lowercase : Any = pil_image.size _lowercase : Dict = pytesseract.image_to_data(SCREAMING_SNAKE_CASE , lang=SCREAMING_SNAKE_CASE , output_type='dict' , config=SCREAMING_SNAKE_CASE ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[str] = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates _lowercase : Optional[Any] = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if not word.strip()] _lowercase : Union[str, Any] = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] _lowercase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] _lowercase : int = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] _lowercase : List[str] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] _lowercase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _lowercase : Any = [] for x, y, w, h in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowercase : Optional[Any] = [x, y, x + w, y + h] actual_boxes.append(SCREAMING_SNAKE_CASE ) # finally, normalize the bounding boxes _lowercase : Union[str, Any] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Union[str, Any] = ["pixel_values"] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = "" , **_lowerCAmelCase , ): super().__init__(**_lowerCAmelCase ) _lowercase : Optional[Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} _lowercase : int = get_size_dict(_lowerCAmelCase ) _lowercase : Dict = do_resize _lowercase : Any = size _lowercase : Tuple = resample _lowercase : Optional[int] = apply_ocr _lowercase : Optional[int] = ocr_lang _lowercase : str = tesseract_config def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ): _lowercase : Any = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _lowercase : List[Any] = (size['height'], size['width']) return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ): _lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize _lowercase : Tuple = size if size is not None else self.size _lowercase : Optional[int] = get_size_dict(_lowerCAmelCase ) _lowercase : Union[str, Any] = resample if resample is not None else self.resample _lowercase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr _lowercase : str = ocr_lang if ocr_lang is not None else self.ocr_lang _lowercase : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config _lowercase : Tuple = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) # All transformations expect numpy arrays. _lowercase : Union[str, Any] = [to_numpy_array(_lowerCAmelCase ) for image in images] if apply_ocr: requires_backends(self , 'pytesseract' ) _lowercase : Any = [] _lowercase : Any = [] for image in images: _lowercase , _lowercase : int = apply_tesseract(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) words_batch.append(_lowerCAmelCase ) boxes_batch.append(_lowerCAmelCase ) if do_resize: _lowercase : List[Any] = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) _lowercase : List[str] = [flip_channel_order(_lowerCAmelCase ) for image in images] _lowercase : int = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowercase : Optional[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=_lowerCAmelCase ) if apply_ocr: _lowercase : Optional[Any] = words_batch _lowercase : Dict = boxes_batch return data
66
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def _lowerCAmelCase(a : int = 100_0000 , a : int = 10 ) -> int: _SCREAMING_SNAKE_CASE =defaultdict(a ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: _SCREAMING_SNAKE_CASE =max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: _SCREAMING_SNAKE_CASE =1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(a , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f"{solution() = }")
255
0
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset __A ="bert-base-cased" __A ="google/pegasus-xsum" __A =[" Sam ate lunch today.", "Sams lunch ingredients."] __A =["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] __A ="patrickvonplaten/t5-tiny-random" __A ="sshleifer/bart-tiny-random" __A ="sshleifer/tiny-mbart" __A ="sshleifer/tiny-marian-en-de" def a ( _UpperCAmelCase : Path , _UpperCAmelCase : list ): '''simple docstring''' __UpperCAmelCase : List[Any] = '''\n'''.join(_UpperCAmelCase ) Path(_UpperCAmelCase ).open('''w''' ).writelines(_UpperCAmelCase ) def a ( _UpperCAmelCase : Union[str, Any] ): '''simple docstring''' for split in ["train", "val", "test"]: _dump_articles(os.path.join(_UpperCAmelCase , f'{split}.source' ) , _UpperCAmelCase ) _dump_articles(os.path.join(_UpperCAmelCase , f'{split}.target' ) , _UpperCAmelCase ) return tmp_dir class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def snake_case__ ( self : List[Any] , a_ : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = AutoTokenizer.from_pretrained(a_ ) __UpperCAmelCase : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __UpperCAmelCase : List[Any] = max(len(tokenizer.encode(a_ ) ) for a in ARTICLES ) __UpperCAmelCase : Dict = max(len(tokenizer.encode(a_ ) ) for a in SUMMARIES ) __UpperCAmelCase : List[Any] = 4 __UpperCAmelCase : Optional[int] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. __UpperCAmelCase : Tuple = SeqaSeqDataset( a_ , data_dir=a_ , type_path='''train''' , max_source_length=a_ , max_target_length=a_ , src_lang=a_ , tgt_lang=a_ , ) __UpperCAmelCase : Optional[int] = DataLoader(a_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(a_ , a_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place __UpperCAmelCase : str = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def snake_case__ ( self : Union[str, Any] , a_ : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : int = AutoTokenizer.from_pretrained(a_ ) __UpperCAmelCase : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __UpperCAmelCase : Optional[Any] = max(len(tokenizer.encode(a_ ) ) for a in ARTICLES ) __UpperCAmelCase : List[Any] = max(len(tokenizer.encode(a_ ) ) for a in SUMMARIES ) __UpperCAmelCase : int = 4 __UpperCAmelCase : List[str] = LegacySeqaSeqDataset( a_ , data_dir=a_ , type_path='''train''' , max_source_length=20 , max_target_length=a_ , ) __UpperCAmelCase : Optional[int] = DataLoader(a_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def snake_case__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) __UpperCAmelCase : Dict = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) __UpperCAmelCase : Dict = tmp_dir.joinpath('''train.source''' ).open().readlines() __UpperCAmelCase : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(a_ , a_ , 1_28 , a_ ) __UpperCAmelCase : str = {x.name for x in tmp_dir.iterdir()} __UpperCAmelCase : Tuple = {x.name for x in save_dir.iterdir()} __UpperCAmelCase : int = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(a_ ) < len(a_ ) assert len(a_ ) == 1 assert len(packed_examples[0] ) == sum(len(a_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def snake_case__ ( self : Tuple ): '''simple docstring''' if not FAIRSEQ_AVAILABLE: return __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = self._get_dataset(max_len=64 ) __UpperCAmelCase : int = 64 __UpperCAmelCase : Dict = ds.make_dynamic_sampler(a_ , required_batch_size_multiple=a_ ) __UpperCAmelCase : Optional[int] = [len(a_ ) for x in batch_sampler] assert len(set(a_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(a_ ) == len(a_ ) # no dropped or added examples __UpperCAmelCase : Union[str, Any] = DataLoader(a_ , batch_sampler=a_ , collate_fn=ds.collate_fn , num_workers=2 ) __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : Optional[int] = [] for batch in data_loader: __UpperCAmelCase : int = batch['''input_ids'''].shape __UpperCAmelCase : List[str] = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple __UpperCAmelCase : List[Any] = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(a_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(a_ ) assert num_src_per_batch[0] == max(a_ ) if failures: raise AssertionError(F'too many tokens in {len(a_ )} batches' ) def snake_case__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = self._get_dataset(max_len=5_12 ) __UpperCAmelCase : Dict = 2 __UpperCAmelCase : str = ds.make_sortish_sampler(a_ , shuffle=a_ ) __UpperCAmelCase : List[Any] = DataLoader(a_ , batch_size=a_ , collate_fn=ds.collate_fn , num_workers=2 ) __UpperCAmelCase : str = DataLoader(a_ , batch_size=a_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=a_ ) __UpperCAmelCase : Any = tokenizer.pad_token_id def count_pad_tokens(a_ : List[str] , a_ : str="input_ids" ): return [batch[k].eq(a_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(a_ , k='''labels''' ) ) < sum(count_pad_tokens(a_ , k='''labels''' ) ) assert sum(count_pad_tokens(a_ ) ) < sum(count_pad_tokens(a_ ) ) assert len(a_ ) == len(a_ ) def snake_case__ ( self : Tuple , a_ : Optional[int]=10_00 , a_ : Optional[Any]=1_28 ): '''simple docstring''' if os.getenv('''USE_REAL_DATA''' , a_ ): __UpperCAmelCase : Optional[Any] = '''examples/seq2seq/wmt_en_ro''' __UpperCAmelCase : List[Any] = max_len * 2 * 64 if not Path(a_ ).joinpath('''train.len''' ).exists(): save_len_file(a_ , a_ ) else: __UpperCAmelCase : Any = '''examples/seq2seq/test_data/wmt_en_ro''' __UpperCAmelCase : List[Any] = max_len * 4 save_len_file(a_ , a_ ) __UpperCAmelCase : int = AutoTokenizer.from_pretrained(a_ ) __UpperCAmelCase : Dict = SeqaSeqDataset( a_ , data_dir=a_ , type_path='''train''' , max_source_length=a_ , max_target_length=a_ , n_obs=a_ , ) return ds, max_tokens, tokenizer def snake_case__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_dataset() __UpperCAmelCase : Any = set(DistributedSortishSampler(a_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=a_ ) ) __UpperCAmelCase : Any = set(DistributedSortishSampler(a_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=a_ ) ) assert idsa.intersection(a_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def snake_case__ ( self : int , a_ : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(a_ , use_fast=a_ ) if tok_name == MBART_TINY: __UpperCAmelCase : Any = SeqaSeqDataset( a_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) __UpperCAmelCase : Any = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: __UpperCAmelCase : Optional[Any] = SeqaSeqDataset( a_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) __UpperCAmelCase : Any = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(a_ ) == 1 if tok_name == BART_TINY else len(a_ ) == 0
241
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = 42 def __init__( self : Optional[int] , a_ : UNetaDModel , a_ : KarrasVeScheduler ): '''simple docstring''' super().__init__() self.register_modules(unet=a_ , scheduler=a_ ) @torch.no_grad() def __call__( self : Optional[Any] , a_ : int = 1 , a_ : int = 50 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = self.unet.config.sample_size __UpperCAmelCase : int = (batch_size, 3, img_size, img_size) __UpperCAmelCase : Optional[Any] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) __UpperCAmelCase : str = randn_tensor(a_ , generator=a_ , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(a_ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper __UpperCAmelCase : str = self.scheduler.schedule[t] __UpperCAmelCase : str = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat __UpperCAmelCase , __UpperCAmelCase : List[str] = self.scheduler.add_noise_to_input(a_ , a_ , generator=a_ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. __UpperCAmelCase : str = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev __UpperCAmelCase : Optional[Any] = self.scheduler.step(a_ , a_ , a_ , a_ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. __UpperCAmelCase : Tuple = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample __UpperCAmelCase : Optional[Any] = self.scheduler.step_correct( a_ , a_ , a_ , a_ , step_output.prev_sample , step_output['''derivative'''] , ) __UpperCAmelCase : List[Any] = step_output.prev_sample __UpperCAmelCase : str = (sample / 2 + 0.5).clamp(0 , 1 ) __UpperCAmelCase : Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __UpperCAmelCase : Tuple = self.numpy_to_pil(a_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=a_ )
241
1
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorPipeline __SCREAMING_SNAKE_CASE : int = ['''prompt'''] __SCREAMING_SNAKE_CASE : Tuple = ['''prompt''', '''negative_prompt'''] __SCREAMING_SNAKE_CASE : Any = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] __SCREAMING_SNAKE_CASE : Tuple = False @property def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return 3_2 @property def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return 3_2 @property def __lowerCAmelCase ( self : str ): '''simple docstring''' return self.time_input_dim @property def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 1_0_0 @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' __a : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def __lowerCAmelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) __a : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) __a : Union[str, Any] = { 'num_attention_heads': 2, 'attention_head_dim': 1_2, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } __a : Optional[Any] = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __a : Tuple = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) __a : Dict = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , ) __a : int = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE__ ) return model @property def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' __a : Optional[Any] = CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_2_4 , ) return image_processor def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Union[str, Any] = self.dummy_prior __a : int = self.dummy_image_encoder __a : Optional[Any] = self.dummy_text_encoder __a : Tuple = self.dummy_tokenizer __a : Dict = self.dummy_image_processor __a : Tuple = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=10.0 , ) __a : Any = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any=0 ): '''simple docstring''' if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): __a : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __a : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __a : Tuple = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' __a : Union[str, Any] = 'cpu' __a : str = self.get_dummy_components() __a : Optional[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __a : List[Any] = output.image_embeds __a : Optional[int] = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0] __a : List[Any] = image[0, -1_0:] __a : Union[str, Any] = image_from_tuple[0, -1_0:] assert image.shape == (1, 3_2) __a : Optional[int] = np.array( [-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def __lowerCAmelCase ( self : Any ): '''simple docstring''' __a : str = torch_device == 'cpu' __a : Tuple = True __a : Optional[Any] = False self._test_inference_batch_single_identical( test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , ) @skip_mps def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : Optional[int] = torch_device == 'cpu' __a : Any = False self._test_attention_slicing_forward_pass( test_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , )
47
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') lowerCAmelCase_ : Optional[int] = logging.getLogger(__name__) @dataclass class lowerCamelCase_ : _lowerCAmelCase : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowerCAmelCase : Optional[str] = field( default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowerCAmelCase : Optional[str] = field( default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowerCAmelCase : Optional[str] = field( default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) _lowerCAmelCase : bool = field( default=snake_case_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) _lowerCAmelCase : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) _lowerCAmelCase : bool = field( default=snake_case_ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) @dataclass class lowerCamelCase_ : _lowerCAmelCase : Optional[str] = field(default=snake_case_ , metadata={'help': 'The input training data file (a text file).'} ) _lowerCAmelCase : Optional[str] = field( default=snake_case_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) _lowerCAmelCase : bool = field( default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) _lowerCAmelCase : Optional[int] = field( default=snake_case_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) _lowerCAmelCase : Optional[int] = field( default=snake_case_ , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. If passed, sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowerCAmelCase : bool = field( default=snake_case_ , metadata={ 'help': ( 'Whether to pad all samples to the maximum sentence length. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch. More ' 'efficient on GPU but very bad for TPU.' ) } , ) _lowerCAmelCase : Optional[int] = field( default=snake_case_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) _lowerCAmelCase : Optional[int] = field( default=snake_case_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def __lowercase ( self : Tuple ): """simple docstring""" if self.train_file is not None: SCREAMING_SNAKE_CASE : List[str] = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: SCREAMING_SNAKE_CASE : List[Any] = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowerCamelCase_ : _lowerCAmelCase : PreTrainedTokenizerBase _lowerCAmelCase : Union[bool, str, PaddingStrategy] = True _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Optional[int] = None def __call__( self : List[Any] , lowerCAmelCase__ : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = '''label''' if '''label''' in features[0].keys() else '''labels''' SCREAMING_SNAKE_CASE : Union[str, Any] = [feature.pop(lowerCAmelCase__ ) for feature in features] SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Any = len(features[0]['''input_ids'''] ) SCREAMING_SNAKE_CASE : str = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features ] SCREAMING_SNAKE_CASE : Optional[int] = list(chain(*lowerCAmelCase__ ) ) SCREAMING_SNAKE_CASE : Dict = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) # Un-flatten SCREAMING_SNAKE_CASE : int = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()} # Add back labels SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) return batch def UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_swag''' , A , A ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(A ) datasets.utils.logging.set_verbosity(A ) transformers.utils.logging.set_verbosity(A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. SCREAMING_SNAKE_CASE : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = {} if data_args.train_file is not None: SCREAMING_SNAKE_CASE : Dict = data_args.train_file if data_args.validation_file is not None: SCREAMING_SNAKE_CASE : Dict = data_args.validation_file SCREAMING_SNAKE_CASE : List[str] = data_args.train_file.split('''.''' )[-1] SCREAMING_SNAKE_CASE : Optional[int] = load_dataset( A , data_files=A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset( '''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE : int = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. SCREAMING_SNAKE_CASE : Optional[Any] = [F"""ending{i}""" for i in range(4 )] SCREAMING_SNAKE_CASE : Any = '''sent1''' SCREAMING_SNAKE_CASE : Tuple = '''sent2''' if data_args.max_seq_length is None: SCREAMING_SNAKE_CASE : List[str] = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( '''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value''' ''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can''' ''' override this default with `--block_size xxx`.''' ) SCREAMING_SNAKE_CASE : List[str] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) SCREAMING_SNAKE_CASE : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(A : Any ): SCREAMING_SNAKE_CASE : int = [[context] * 4 for context in examples[context_name]] SCREAMING_SNAKE_CASE : str = examples[question_header_name] SCREAMING_SNAKE_CASE : List[Any] = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(A ) ] # Flatten out SCREAMING_SNAKE_CASE : List[str] = list(chain(*A ) ) SCREAMING_SNAKE_CASE : int = list(chain(*A ) ) # Tokenize SCREAMING_SNAKE_CASE : Optional[int] = tokenizer( A , A , truncation=A , max_length=A , padding='''max_length''' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(A ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''] if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = min(len(A ) , data_args.max_train_samples ) SCREAMING_SNAKE_CASE : Optional[Any] = train_dataset.select(range(A ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): SCREAMING_SNAKE_CASE : str = train_dataset.map( A , batched=A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) SCREAMING_SNAKE_CASE : str = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: SCREAMING_SNAKE_CASE : List[Any] = min(len(A ) , data_args.max_eval_samples ) SCREAMING_SNAKE_CASE : int = eval_dataset.select(range(A ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): SCREAMING_SNAKE_CASE : Tuple = eval_dataset.map( A , batched=A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator SCREAMING_SNAKE_CASE : Optional[int] = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=A , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(A : str ): SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = eval_predictions SCREAMING_SNAKE_CASE : Any = np.argmax(A , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer SCREAMING_SNAKE_CASE : Union[str, Any] = Trainer( model=A , args=A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=A , data_collator=A , compute_metrics=A , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE : Optional[Any] = None if training_args.resume_from_checkpoint is not None: SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint elif last_checkpoint is not None: SCREAMING_SNAKE_CASE : List[Any] = last_checkpoint SCREAMING_SNAKE_CASE : List[Any] = trainer.train(resume_from_checkpoint=A ) trainer.save_model() # Saves the tokenizer too for easy upload SCREAMING_SNAKE_CASE : List[str] = train_result.metrics SCREAMING_SNAKE_CASE : str = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(A ) ) SCREAMING_SNAKE_CASE : Dict = min(A , len(A ) ) trainer.log_metrics('''train''' , A ) trainer.save_metrics('''train''' , A ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) SCREAMING_SNAKE_CASE : Optional[int] = trainer.evaluate() SCREAMING_SNAKE_CASE : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A ) SCREAMING_SNAKE_CASE : List[str] = min(A , len(A ) ) trainer.log_metrics('''eval''' , A ) trainer.save_metrics('''eval''' , A ) SCREAMING_SNAKE_CASE : Dict = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''multiple-choice''', '''dataset_tags''': '''swag''', '''dataset_args''': '''regular''', '''dataset''': '''SWAG''', '''language''': '''en''', } if training_args.push_to_hub: trainer.push_to_hub(**A ) else: trainer.create_model_card(**A ) def UpperCAmelCase ( A : str ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
527
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: assert column_title.isupper() lowerCAmelCase__ : Optional[int] = 0 lowerCAmelCase__ : int = len(SCREAMING_SNAKE_CASE_ ) - 1 lowerCAmelCase__ : Union[str, Any] = 0 while index >= 0: lowerCAmelCase__ : int = (ord(column_title[index] ) - 64) * pow(26 , SCREAMING_SNAKE_CASE_ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
712
import unittest from transformers import DonutProcessor lowerCamelCase__ = """naver-clova-ix/donut-base""" class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Dict = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } lowerCAmelCase__ : Union[str, Any] = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a ) self.assertDictEqual(a , a )
69
0
'''simple docstring''' _SCREAMING_SNAKE_CASE : List[Any] = [0, 2, 4, 6, 8] _SCREAMING_SNAKE_CASE : Tuple = [1, 3, 5, 7, 9] def UpperCamelCase_( snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Any ): '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 1_0 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 snake_case_ = 0 for digit in range(1_0 ): snake_case_ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 1_0 , snake_case , snake_case ) return result snake_case_ = 0 for digita in range(1_0 ): snake_case_ = digita if (remainder + digita) % 2 == 0: snake_case_ = ODD_DIGITS else: snake_case_ = EVEN_DIGITS for digita in other_parity_digits: snake_case_ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 1_0 , snake_case , snake_case , ) return result def UpperCamelCase_( snake_case : Optional[Any] = 9 ): '''simple docstring''' snake_case_ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(snake_case , 0 , [0] * length , snake_case ) return result if __name__ == "__main__": print(F"{solution() = }")
400
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training") # TF training parameters SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : Optional[int] = False def lowercase ( SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' return TrainCommand(SCREAMING_SNAKE_CASE ) class a_ ( SCREAMING_SNAKE_CASE__ ): @staticmethod def A_( SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = parser.add_parser('train' , help='CLI tool to train a model on a task.' ) train_parser.add_argument( '--train_data' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , ) train_parser.add_argument( '--column_label' , type=SCREAMING_SNAKE_CASE , default=0 , help='Column of the dataset csv file with example labels.' ) train_parser.add_argument( '--column_text' , type=SCREAMING_SNAKE_CASE , default=1 , help='Column of the dataset csv file with example texts.' ) train_parser.add_argument( '--column_id' , type=SCREAMING_SNAKE_CASE , default=2 , help='Column of the dataset csv file with example ids.' ) train_parser.add_argument( '--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' ) train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE , default='' , help='path to validation dataset.' ) train_parser.add_argument( '--validation_split' , type=SCREAMING_SNAKE_CASE , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , ) train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE , default='./' , help='path to saved the trained model.' ) train_parser.add_argument( '--task' , type=SCREAMING_SNAKE_CASE , default='text_classification' , help='Task to train the model on.' ) train_parser.add_argument( '--model' , type=SCREAMING_SNAKE_CASE , default='bert-base-uncased' , help='Model\'s name or path to stored model.' ) train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE , default=32 , help='Batch size for training.' ) train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE , default=64 , help='Batch size for validation.' ) train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE , default=3e-5 , help='Learning rate.' ) train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE , default=1e-08 , help='Epsilon for Adam optimizer.' ) train_parser.set_defaults(func=SCREAMING_SNAKE_CASE ) def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = logging.get_logger('transformers-cli/training' ) SCREAMING_SNAKE_CASE_ = 'tf' if is_tf_available() else 'torch' os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = args.output SCREAMING_SNAKE_CASE_ = args.column_label SCREAMING_SNAKE_CASE_ = args.column_text SCREAMING_SNAKE_CASE_ = args.column_id self.logger.info(f'Loading {args.task} pipeline for {args.model}' ) if args.task == "text_classification": SCREAMING_SNAKE_CASE_ = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f'Loading dataset from {args.train_data}' ) SCREAMING_SNAKE_CASE_ = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE_ = None if args.validation_data: self.logger.info(f'Loading validation dataset from {args.validation_data}' ) SCREAMING_SNAKE_CASE_ = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE_ = args.validation_split SCREAMING_SNAKE_CASE_ = args.train_batch_size SCREAMING_SNAKE_CASE_ = args.valid_batch_size SCREAMING_SNAKE_CASE_ = args.learning_rate SCREAMING_SNAKE_CASE_ = args.adam_epsilon def A_( self ) -> Dict: """simple docstring""" if self.framework == "tf": return self.run_tf() return self.run_torch() def A_( self ) -> Optional[int]: """simple docstring""" raise NotImplementedError def A_( self ) -> Dict: """simple docstring""" self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
205
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: List[Any]=False ) -> Dict: UpperCamelCase__ : str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCamelCase__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: List[str] , __UpperCAmelCase: str=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: UpperCamelCase__ : int = '''''' else: UpperCamelCase__ : str = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase__ : Any = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) UpperCamelCase__ : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ : Any = in_proj_weight[ : config.hidden_size, : ] UpperCamelCase__ : Any = in_proj_bias[: config.hidden_size] UpperCamelCase__ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase__ : int = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ : Tuple = in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> List[Any]: UpperCamelCase__ : str = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[str] , __UpperCAmelCase: int ) -> str: UpperCamelCase__ : Optional[Any] = dct.pop(__UpperCAmelCase ) UpperCamelCase__ : Optional[Any] = val def lowerCAmelCase_ ( ) -> int: UpperCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCamelCase__ : Union[str, Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Any ) -> Tuple: UpperCamelCase__ : Dict = ViTConfig() UpperCamelCase__ : Tuple = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": UpperCamelCase__ : Any = True UpperCamelCase__ : str = int(vit_name[-12:-10] ) UpperCamelCase__ : List[Any] = int(vit_name[-9:-6] ) else: UpperCamelCase__ : Tuple = 1000 UpperCamelCase__ : Optional[int] = '''huggingface/label-files''' UpperCamelCase__ : str = '''imagenet-1k-id2label.json''' UpperCamelCase__ : List[str] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCamelCase__ : Tuple = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} UpperCamelCase__ : List[Any] = idalabel UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} UpperCamelCase__ : Dict = int(vit_name[-6:-4] ) UpperCamelCase__ : Optional[int] = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): UpperCamelCase__ : List[str] = 192 UpperCamelCase__ : int = 768 UpperCamelCase__ : Any = 12 UpperCamelCase__ : Optional[int] = 3 elif vit_name[9:].startswith('''small''' ): UpperCamelCase__ : List[Any] = 384 UpperCamelCase__ : Union[str, Any] = 1536 UpperCamelCase__ : Optional[int] = 12 UpperCamelCase__ : List[Any] = 6 else: pass else: if vit_name[4:].startswith('''small''' ): UpperCamelCase__ : str = 768 UpperCamelCase__ : int = 2304 UpperCamelCase__ : List[str] = 8 UpperCamelCase__ : Optional[int] = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): UpperCamelCase__ : Optional[Any] = 1024 UpperCamelCase__ : int = 4096 UpperCamelCase__ : List[Any] = 24 UpperCamelCase__ : Any = 16 elif vit_name[4:].startswith('''huge''' ): UpperCamelCase__ : Any = 1280 UpperCamelCase__ : int = 5120 UpperCamelCase__ : Union[str, Any] = 32 UpperCamelCase__ : Optional[int] = 16 # load original model from timm UpperCamelCase__ : Union[str, Any] = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCamelCase__ : Dict = timm_model.state_dict() if base_model: remove_classification_head_(__UpperCAmelCase ) UpperCamelCase__ : Tuple = create_rename_keys(__UpperCAmelCase , __UpperCAmelCase ) for src, dest in rename_keys: rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": UpperCamelCase__ : List[Any] = ViTModel(__UpperCAmelCase ).eval() else: UpperCamelCase__ : List[Any] = ViTForImageClassification(__UpperCAmelCase ).eval() model.load_state_dict(__UpperCAmelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: UpperCamelCase__ : List[Any] = DeiTImageProcessor(size=config.image_size ) else: UpperCamelCase__ : Any = ViTImageProcessor(size=config.image_size ) UpperCamelCase__ : Optional[int] = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCamelCase__ : Any = encoding['''pixel_values'''] UpperCamelCase__ : Dict = model(__UpperCAmelCase ) if base_model: UpperCamelCase__ : List[Any] = timm_model.forward_features(__UpperCAmelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__UpperCAmelCase , outputs.pooler_output , atol=1e-3 ) else: UpperCamelCase__ : Optional[int] = timm_model(__UpperCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__UpperCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
369
from manim import * class lowercase__ ( __lowerCamelCase ): '''simple docstring''' def UpperCamelCase__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase__ : Any = Rectangle(height=0.5, width=0.5 ) UpperCamelCase__ : Any = Rectangle(height=0.25, width=0.25 ) UpperCamelCase__ : Dict = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 ) UpperCamelCase__ : List[Any] = [mem.copy() for i in range(6 )] UpperCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )] UpperCamelCase__ : str = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : List[str] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : Tuple = VGroup(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : List[str] = Text('''CPU''', font_size=24 ) UpperCamelCase__ : Any = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__magic_name__ ) UpperCamelCase__ : int = [mem.copy() for i in range(4 )] UpperCamelCase__ : str = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : Tuple = Text('''GPU''', font_size=24 ) UpperCamelCase__ : str = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ ) gpu.move_to([-1, -1, 0] ) self.add(__magic_name__ ) UpperCamelCase__ : str = [mem.copy() for i in range(6 )] UpperCamelCase__ : Tuple = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : Tuple = Text('''Model''', font_size=24 ) UpperCamelCase__ : Tuple = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ ) model.move_to([3, -1.0, 0] ) self.add(__magic_name__ ) UpperCamelCase__ : Union[str, Any] = [] UpperCamelCase__ : Any = [] UpperCamelCase__ : List[Any] = [] for i, rect in enumerate(__magic_name__ ): rect.set_stroke(__magic_name__ ) UpperCamelCase__ : Tuple = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__magic_name__, opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__magic_name__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0], direction=__magic_name__, buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1], direction=__magic_name__, buff=0.0 ) self.add(__magic_name__ ) model_cpu_arr.append(__magic_name__ ) self.add(*__magic_name__, *__magic_name__, *__magic_name__ ) UpperCamelCase__ : Tuple = [mem.copy() for i in range(6 )] UpperCamelCase__ : Dict = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : List[str] = Text('''Loaded Checkpoint''', font_size=24 ) UpperCamelCase__ : Optional[Any] = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(__magic_name__ ) UpperCamelCase__ : Tuple = [] UpperCamelCase__ : List[str] = [] for i, rect in enumerate(__magic_name__ ): UpperCamelCase__ : Optional[int] = fill.copy().set_fill(__magic_name__, opacity=0.7 ) target.move_to(__magic_name__ ) ckpt_arr.append(__magic_name__ ) UpperCamelCase__ : int = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__magic_name__ ) self.add(*__magic_name__, *__magic_name__ ) UpperCamelCase__ : List[str] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCamelCase__ : List[str] = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, ) key_text.move_to([-5, 2.4, 0] ) self.add(__magic_name__, __magic_name__ ) UpperCamelCase__ : Any = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint", font_size=18, ) blue_text.next_to(__magic_name__, DOWN * 2.4, aligned_edge=key_text.get_left() ) self.add(__magic_name__ ) UpperCamelCase__ : Dict = MarkupText( f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.", font_size=24, ) step_a.move_to([2, 2, 0] ) UpperCamelCase__ : Any = [meta_mem.copy() for i in range(6 )] UpperCamelCase__ : int = [meta_mem.copy() for i in range(6 )] UpperCamelCase__ : Optional[int] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : Union[str, Any] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : Union[str, Any] = VGroup(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : Any = Text('''Disk''', font_size=24 ) UpperCamelCase__ : List[Any] = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__magic_name__, run_time=3 ), Write(__magic_name__, run_time=1 ), Create(__magic_name__, run_time=1 ) ) UpperCamelCase__ : Union[str, Any] = [] for i, rect in enumerate(__magic_name__ ): UpperCamelCase__ : Union[str, Any] = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__magic_name__, run_time=1.5 ) ) self.play(*__magic_name__ ) self.play(FadeOut(__magic_name__ ) ) UpperCamelCase__ : Optional[int] = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection.", font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__magic_name__, run_time=3 ) ) self.play( FadeOut(__magic_name__, __magic_name__, *__magic_name__, *__magic_name__ ), ) self.wait()
369
1
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCAmelCase__ ( __snake_case ) -> int: return EnvironmentCommand() class _snake_case ( lowerCamelCase ): """simple docstring""" @staticmethod def lowercase_ ( a ) -> List[Any]: """simple docstring""" _A = parser.add_parser('''env''' ) download_parser.set_defaults(func=a ) def lowercase_ ( self ) -> Union[str, Any]: """simple docstring""" _A = huggingface_hub.__version__ _A = '''not installed''' _A = '''NA''' if is_torch_available(): import torch _A = torch.__version__ _A = torch.cuda.is_available() _A = '''not installed''' if is_transformers_available(): import transformers _A = transformers.__version__ _A = '''not installed''' if is_accelerate_available(): import accelerate _A = accelerate.__version__ _A = '''not installed''' if is_xformers_available(): import xformers _A = xformers.__version__ _A = { '''`diffusers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''', '''Huggingface_hub version''': hub_version, '''Transformers version''': transformers_version, '''Accelerate version''': accelerate_version, '''xFormers version''': xformers_version, '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(a ) ) return info @staticmethod def lowercase_ ( a ) -> Union[str, Any]: """simple docstring""" return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
317
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Dict: """simple docstring""" if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=a , ) assert hasattr(self , '''env''' ) def lowercase_ ( self , a=1 ) -> Optional[int]: """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def lowercase_ ( self , a ) -> Any: """simple docstring""" TrainingJobAnalytics(a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) def lowercase_ ( self ) -> Optional[int]: """simple docstring""" _A = self.create_estimator() # run training estimator.fit() # result dataframe _A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _A = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) _A = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a )
317
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowercase__ ( unittest.TestCase ): __UpperCamelCase = inspect.getfile(accelerate.test_utils ) __UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] ) __UpperCamelCase = ["""accelerate""", """launch"""] __UpperCamelCase = Path.home() / """.cache/huggingface/accelerate""" __UpperCamelCase = """default_config.yaml""" __UpperCamelCase = config_folder / config_file __UpperCamelCase = config_folder / """_default_config.yaml""" __UpperCamelCase = Path("""tests/test_configs""" ) @classmethod def UpperCAmelCase__ ( cls ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def UpperCAmelCase__ ( cls ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : int = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def UpperCAmelCase__ ( self ): for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ): with self.subTest(config_file=_lowercase ): execute_subprocess_async( self.base_cmd + ["""--config_file""", str(_lowercase ), self.test_file_path] , env=os.environ.copy() ) def UpperCAmelCase__ ( self ): execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() ) class lowercase__ ( unittest.TestCase ): __UpperCamelCase = """test-tpu""" __UpperCamelCase = """us-central1-a""" __UpperCamelCase = """ls""" __UpperCamelCase = ["""accelerate""", """tpu-config"""] __UpperCamelCase = """cd /usr/share""" __UpperCamelCase = """tests/test_samples/test_command_file.sh""" __UpperCamelCase = """Running gcloud compute tpus tpu-vm ssh""" def UpperCAmelCase__ ( self ): lowerCAmelCase_ : str = run_command( self.cmd + ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : str = run_command( self.cmd + [ """--config_file""", """tests/test_configs/0_12_0.yaml""", """--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug""", ] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : Optional[Any] = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=_lowercase ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : Tuple = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : str = run_command( self.cmd + [ """--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--command""", """echo \"Hello World\"""", """--debug""", ] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : str = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : Optional[Any] = run_command( self.cmd + [ """--config_file""", """tests/test_configs/0_12_0.yaml""", """--command_file""", self.command_file, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug""", ] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : Tuple = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , ) def UpperCAmelCase__ ( self ): lowerCAmelCase_ : Any = run_command( self.cmd + [ """--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--accelerate_version""", """12.0.0""", """--debug""", ] , return_stdout=_lowercase , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
440
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase_ : Tuple = { """configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Tuple = ["""VivitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Any = [ """VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """VivitModel""", """VivitPreTrainedModel""", """VivitForVideoClassification""", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
440
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): UpperCAmelCase = [] create_all_state(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , [] , SCREAMING_SNAKE_CASE ) return result def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[list[int]] , ): if level == 0: total_list.append(current_list[:] ) return for i in range(SCREAMING_SNAKE_CASE , total_number - level + 2 ): current_list.append(SCREAMING_SNAKE_CASE ) create_all_state(i + 1 , SCREAMING_SNAKE_CASE , level - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) current_list.pop() def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : list[list[int]] ): for i in total_list: print(*SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _a : Dict = 4 _a : Dict = 2 _a : Optional[Any] = generate_all_combinations(n, k) print_all_state(total_list)
447
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : str = logging.get_logger(__name__) _a : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowercase_ ( a ): '''simple docstring''' __lowerCAmelCase : List[Any] = "cvt" def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[6_4, 1_9_2, 3_8_4] , a_=[1, 3, 6] , a_=[1, 2, 1_0] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ) -> Union[str, Any]: """simple docstring""" super().__init__(**a_ ) UpperCAmelCase = num_channels UpperCAmelCase = patch_sizes UpperCAmelCase = patch_stride UpperCAmelCase = patch_padding UpperCAmelCase = embed_dim UpperCAmelCase = num_heads UpperCAmelCase = depth UpperCAmelCase = mlp_ratio UpperCAmelCase = attention_drop_rate UpperCAmelCase = drop_rate UpperCAmelCase = drop_path_rate UpperCAmelCase = qkv_bias UpperCAmelCase = cls_token UpperCAmelCase = qkv_projection_method UpperCAmelCase = kernel_qkv UpperCAmelCase = padding_kv UpperCAmelCase = stride_kv UpperCAmelCase = padding_q UpperCAmelCase = stride_q UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps
447
1
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin _snake_case : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = BartphoTokenizer UpperCamelCase = False UpperCamelCase = True def lowerCamelCase ( self :Dict ): super().setUp() A = ["▁This", "▁is", "▁a", "▁t", "est"] A = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) A = {"unk_token": "<unk>"} A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] ) with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n" ) A = BartphoTokenizer(__UpperCamelCase , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self :List[Any] , **__UpperCamelCase :List[str] ): kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :int ): A = "This is a là test" A = "This is a<unk><unk> test" return input_text, output_text def lowerCamelCase ( self :Tuple ): A = BartphoTokenizer(__UpperCamelCase , self.monolingual_vocab_file , **self.special_tokens_map ) A = "This is a là test" A = "▁This ▁is ▁a ▁l à ▁t est".split() A = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) A = tokens + [tokenizer.unk_token] A = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
524
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging _snake_case : Optional[Any] = logging.get_logger(__name__) _snake_case : Optional[Any] = { 'Visual-Attention-Network/van-base': ( 'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json' ), } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''van''' def __init__( self :Optional[int] , __UpperCamelCase :Tuple=2_24 , __UpperCamelCase :Tuple=3 , __UpperCamelCase :int=[7, 3, 3, 3] , __UpperCamelCase :List[str]=[4, 2, 2, 2] , __UpperCamelCase :str=[64, 1_28, 3_20, 5_12] , __UpperCamelCase :Union[str, Any]=[3, 3, 12, 3] , __UpperCamelCase :Dict=[8, 8, 4, 4] , __UpperCamelCase :List[Any]="gelu" , __UpperCamelCase :str=0.02 , __UpperCamelCase :str=1e-6 , __UpperCamelCase :Tuple=1e-2 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :List[Any]=0.0 , **__UpperCamelCase :List[str] , ): super().__init__(**__UpperCamelCase ) A = image_size A = num_channels A = patch_sizes A = strides A = hidden_sizes A = depths A = mlp_ratios A = hidden_act A = initializer_range A = layer_norm_eps A = layer_scale_init_value A = drop_path_rate A = dropout_rate
524
1
"""simple docstring""" def UpperCAmelCase ( snake_case : int ): if n_term == "": return [] _lowerCAmelCase:Dict = [] for temp in range(int(snake_case ) ): series.append(F'1/{temp + 1}' if series else '''1''' ) return series if __name__ == "__main__": UpperCamelCase__ = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
227
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs A__ : List[str] =imread(r'''digital_image_processing/image_data/lena_small.jpg''') A__ : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase ) # assert negative_img array for at least one True assert negative_img.any() def UpperCamelCase__ ( ): """simple docstring""" with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCAmelCase , 1_10 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() _lowerCAmelCase = canny.canny(lowerCAmelCase ) # assert canny array for at least one True assert canny_array.any() def UpperCamelCase__ ( ): """simple docstring""" assert gg.gaussian_filter(lowerCAmelCase , 5 , sigma=0.9 ).all() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) _lowerCAmelCase = conv.img_convolve(lowerCAmelCase , lowerCAmelCase ).astype(lowerCAmelCase ) assert res.any() def UpperCamelCase__ ( ): """simple docstring""" assert med.median_filter(lowerCAmelCase , 3 ).any() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = sob.sobel_filter(lowerCAmelCase ) assert grad.any() and theta.any() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = sp.make_sepia(lowerCAmelCase , 20 ) assert sepia.all() def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" _lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" _lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. _lowerCAmelCase = imread(lowerCAmelCase , 0 ) # Test for get_neighbors_pixel function() return not None _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = image[x_coordinate][y_coordinate] _lowerCAmelCase = lbp.get_neighbors_pixel( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image _lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): _lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) assert lbp_image.any()
207
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''IBertForMaskedLM''', '''IBertForMultipleChoice''', '''IBertForQuestionAnswering''', '''IBertForSequenceClassification''', '''IBertForTokenClassification''', '''IBertModel''', '''IBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
721
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _snake_case ( lowerCAmelCase_ ): """simple docstring""" def __A ( self : Optional[Any] ): lowerCAmelCase_ : Dict =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase_ , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(UpperCamelCase_ , '''neck_hidden_sizes''' ) ) self.parent.assertTrue(hasattr(UpperCamelCase_ , '''num_attention_heads''' ) ) class _snake_case : """simple docstring""" def __init__( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=13 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=640 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="silu" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Any=0.0_2 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : List[Any]=None , ): lowerCAmelCase_ : List[str] =parent lowerCAmelCase_ : Tuple =batch_size lowerCAmelCase_ : Tuple =image_size lowerCAmelCase_ : Any =patch_size lowerCAmelCase_ : Any =num_channels lowerCAmelCase_ : Dict =last_hidden_size lowerCAmelCase_ : Optional[int] =num_attention_heads lowerCAmelCase_ : str =hidden_act lowerCAmelCase_ : Dict =conv_kernel_size lowerCAmelCase_ : int =output_stride lowerCAmelCase_ : Tuple =hidden_dropout_prob lowerCAmelCase_ : Optional[Any] =attention_probs_dropout_prob lowerCAmelCase_ : List[str] =classifier_dropout_prob lowerCAmelCase_ : int =use_labels lowerCAmelCase_ : Dict =is_training lowerCAmelCase_ : Any =num_labels lowerCAmelCase_ : Optional[Any] =initializer_range lowerCAmelCase_ : List[str] =scope def __A ( self : int ): lowerCAmelCase_ : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ : Any =None lowerCAmelCase_ : Optional[Any] =None if self.use_labels: lowerCAmelCase_ : Optional[int] =ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase_ : Optional[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase_ : str =self.get_config() return config, pixel_values, labels, pixel_labels def __A ( self : Optional[Any] ): return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __A ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any ): lowerCAmelCase_ : Optional[Any] =MobileViTModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase_ : Optional[Any] =model(UpperCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __A ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ): lowerCAmelCase_ : List[str] =self.num_labels lowerCAmelCase_ : Tuple =MobileViTForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase_ : str =model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase_ : Optional[int] =self.num_labels lowerCAmelCase_ : Any =MobileViTForSemanticSegmentation(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __A ( self : Dict ): lowerCAmelCase_ : Any =self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int =config_and_inputs lowerCAmelCase_ : Tuple ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCamelCase : Optional[Any] = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) _UpperCamelCase : List[str] = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCamelCase : str = False _UpperCamelCase : Any = False _UpperCamelCase : Dict = False _UpperCamelCase : Any = False def __A ( self : List[str] ): lowerCAmelCase_ : str =MobileViTModelTester(self ) lowerCAmelCase_ : int =MobileViTConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ ) def __A ( self : Dict ): self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViT does not use inputs_embeds''' ) def __A ( self : int ): pass @unittest.skip(reason='''MobileViT does not support input and output embeddings''' ) def __A ( self : Optional[int] ): pass @unittest.skip(reason='''MobileViT does not output attentions''' ) def __A ( self : List[str] ): pass def __A ( self : Tuple ): lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Optional[Any] =model_class(UpperCamelCase_ ) lowerCAmelCase_ : List[str] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : List[str] =[*signature.parameters.keys()] lowerCAmelCase_ : Tuple =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __A ( self : Tuple ): pass def __A ( self : Optional[int] ): lowerCAmelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __A ( self : Dict ): def check_hidden_states_output(UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ): lowerCAmelCase_ : Union[str, Any] =model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): lowerCAmelCase_ : List[str] =model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase_ : Dict =outputs.hidden_states lowerCAmelCase_ : Union[str, Any] =5 self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCAmelCase_ : Union[str, Any] =2 for i in range(len(UpperCamelCase_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Any =True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ : List[str] =True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __A ( self : int ): lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) def __A ( self : Optional[Any] ): lowerCAmelCase_ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ ) @slow def __A ( self : List[Any] ): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Any =MobileViTModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( ): lowerCAmelCase_ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : Dict ): return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None @slow def __A ( self : Union[str, Any] ): lowerCAmelCase_ : Tuple =MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(UpperCamelCase_ ) lowerCAmelCase_ : Optional[Any] =self.default_image_processor lowerCAmelCase_ : Optional[int] =prepare_img() lowerCAmelCase_ : Union[str, Any] =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase_ : int =model(**UpperCamelCase_ ) # verify the logits lowerCAmelCase_ : Optional[int] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowerCAmelCase_ : List[Any] =torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) ) @slow def __A ( self : Union[str, Any] ): lowerCAmelCase_ : List[str] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) lowerCAmelCase_ : Tuple =model.to(UpperCamelCase_ ) lowerCAmelCase_ : Any =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) lowerCAmelCase_ : int =prepare_img() lowerCAmelCase_ : Optional[int] =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase_ : Optional[Any] =model(**UpperCamelCase_ ) lowerCAmelCase_ : List[str] =outputs.logits # verify the logits lowerCAmelCase_ : Dict =torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , UpperCamelCase_ ) lowerCAmelCase_ : List[str] =torch.tensor( [ [[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]], [[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]], [[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]], ] , device=UpperCamelCase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) @slow def __A ( self : Tuple ): lowerCAmelCase_ : Optional[int] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) lowerCAmelCase_ : str =model.to(UpperCamelCase_ ) lowerCAmelCase_ : int =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) lowerCAmelCase_ : Union[str, Any] =prepare_img() lowerCAmelCase_ : str =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase_ : Optional[int] =model(**UpperCamelCase_ ) lowerCAmelCase_ : str =outputs.logits.detach().cpu() lowerCAmelCase_ : Any =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)] ) lowerCAmelCase_ : Optional[int] =torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , UpperCamelCase_ ) lowerCAmelCase_ : Tuple =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ ) lowerCAmelCase_ : List[str] =torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
305
0
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): __lowerCamelCase : Optional[int] = yaml.safe_load( "\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n" ) __lowerCamelCase : Dict = { "name": "root", "text": "", "is_empty_text": True, "subsections": [ { "name": "Dataset Card for My Dataset", "text": "", "is_empty_text": True, "subsections": [ {"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []}, { "name": "Dataset Description", "text": "Some text here.", "is_empty_text": False, "subsections": [ { "name": "Dataset Summary", "text": "Some text here.", "is_empty_text": False, "subsections": [], }, { "name": "Supported Tasks and Leaderboards", "text": "", "is_empty_text": True, "subsections": [], }, {"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []}, ], }, ], } ], } __lowerCamelCase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" __lowerCamelCase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" __lowerCamelCase : Optional[Any] = { "name": "root", "text": "", "is_empty_text": True, "subsections": [ { "name": "Dataset Card for My Dataset", "text": "", "is_empty_text": True, "subsections": [ {"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []}, { "name": "Dataset Description", "text": "Some text here.", "is_empty_text": False, "subsections": [ { "name": "Dataset Summary", "text": "Some text here.", "is_empty_text": False, "subsections": [ { "name": "Extra Ignored Subsection", "text": "", "is_empty_text": True, "subsections": [], } ], }, { "name": "Supported Tasks and Leaderboards", "text": "", "is_empty_text": True, "subsections": [], }, {"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []}, ], }, ], } ], } __lowerCamelCase : Optional[Any] = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" __lowerCamelCase : List[str] = ( "The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README." ) __lowerCamelCase : Tuple = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" __lowerCamelCase : Tuple = ( "The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README." ) __lowerCamelCase : Optional[int] = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" __lowerCamelCase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README." __lowerCamelCase : int = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" __lowerCamelCase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)." __lowerCamelCase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n" __lowerCamelCase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'." __lowerCamelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n" __lowerCamelCase : Any = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`." __lowerCamelCase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n" __lowerCamelCase : Tuple = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty." __lowerCamelCase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" __lowerCamelCase : List[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README." __lowerCamelCase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n" __lowerCamelCase : List[str] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README." __lowerCamelCase : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" __lowerCamelCase : Dict = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README." __lowerCamelCase : Optional[int] = "" __lowerCamelCase : Dict = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README." __lowerCamelCase : Any = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" __lowerCamelCase : List[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections." @pytest.mark.parametrize( "readme_md, expected_dict" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> int: assert ReadMe.from_string(lowerCamelCase_ , lowerCamelCase_ ).to_dict() == expected_dict @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Tuple: with pytest.raises(lowerCamelCase_ , match=re.escape(expected_error.format(path="root" ) ) ): UpperCAmelCase = ReadMe.from_string(lowerCamelCase_ , lowerCamelCase_ ) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]: with pytest.raises(lowerCamelCase_ , match=re.escape(expected_error.format(path="root" ) ) ): ReadMe.from_string(lowerCamelCase_ , lowerCamelCase_ ) @pytest.mark.parametrize( "readme_md," , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase_(lowerCamelCase_ ) -> Union[str, Any]: ReadMe.from_string(lowerCamelCase_ , lowerCamelCase_ , suppress_parsing_errors=lowerCamelCase_ ) @pytest.mark.parametrize( "readme_md, expected_dict" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase = Path(lowerCamelCase_ ) / "README.md" with open(lowerCamelCase_ , "w+" ) as readme_file: readme_file.write(lowerCamelCase_ ) UpperCAmelCase = ReadMe.from_readme(lowerCamelCase_ , lowerCamelCase_ ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase = Path(lowerCamelCase_ ) / "README.md" with open(lowerCamelCase_ , "w+" ) as readme_file: readme_file.write(lowerCamelCase_ ) UpperCAmelCase = expected_error.format(path=lowerCamelCase_ ) with pytest.raises(lowerCamelCase_ , match=re.escape(lowerCamelCase_ ) ): UpperCAmelCase = ReadMe.from_readme(lowerCamelCase_ , lowerCamelCase_ ) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase = Path(lowerCamelCase_ ) / "README.md" with open(lowerCamelCase_ , "w+" ) as readme_file: readme_file.write(lowerCamelCase_ ) UpperCAmelCase = expected_error.format(path=lowerCamelCase_ ) with pytest.raises(lowerCamelCase_ , match=re.escape(lowerCamelCase_ ) ): ReadMe.from_readme(lowerCamelCase_ , lowerCamelCase_ ) @pytest.mark.parametrize( "readme_md," , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase_(lowerCamelCase_ ) -> List[str]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase = Path(lowerCamelCase_ ) / "README.md" with open(lowerCamelCase_ , "w+" ) as readme_file: readme_file.write(lowerCamelCase_ ) ReadMe.from_readme(lowerCamelCase_ , lowerCamelCase_ , suppress_parsing_errors=lowerCamelCase_ )
323
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __lowerCamelCase : List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") __lowerCamelCase : int = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split() __lowerCamelCase : Optional[int] = "|".join(sys.argv[1:]) __lowerCamelCase : Dict = re.compile(rF'''^({joined_dirs}).*?\.py$''') __lowerCamelCase : List[Any] = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
323
1
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : int ) ->None: UpperCAmelCase_ = num_of_nodes UpperCAmelCase_ = [] UpperCAmelCase_ = {} def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) ->None: self.m_edges.append([u_node, v_node, weight] ) def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : int ) ->int: if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : int ) ->None: if self.m_component[u_node] != u_node: for k in self.m_component: UpperCAmelCase_ = self.find_component(UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) ->None: if component_size[u_node] <= component_size[v_node]: UpperCAmelCase_ = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCAmelCase__ ) elif component_size[u_node] >= component_size[v_node]: UpperCAmelCase_ = self.find_component(UpperCAmelCase__ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCAmelCase__ ) def lowerCAmelCase__ ( self : int ) ->None: UpperCAmelCase_ = [] UpperCAmelCase_ = 0 UpperCAmelCase_ = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) UpperCAmelCase_ = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = edge UpperCAmelCase_ = self.m_component[u] UpperCAmelCase_ = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): UpperCAmelCase_ = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = edge UpperCAmelCase_ = self.m_component[u] UpperCAmelCase_ = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 UpperCAmelCase_ = [-1] * self.m_num_of_nodes print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
43
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase__ : Union[str, Any] = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"] lowercase__ : List[Any] = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
43
1
"""simple docstring""" import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ): @register_to_config def __init__( self : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : bool = False , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[int] = nn.Embedding(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[Any] = nn.Embedding(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[int] = nn.Dropout(p=snake_case__ ) UpperCAmelCase__ : Optional[int] = TaConfig( vocab_size=snake_case__ , d_model=snake_case__ , num_heads=snake_case__ , d_kv=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ , feed_forward_proj=snake_case__ , is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , ) UpperCAmelCase__ : Tuple = nn.ModuleList() for lyr_num in range(snake_case__ ): UpperCAmelCase__ : Tuple = TaBlock(snake_case__ ) self.encoders.append(snake_case__ ) UpperCAmelCase__ : str = TaLayerNorm(snake_case__ ) UpperCAmelCase__ : Tuple = nn.Dropout(p=snake_case__ ) def __a ( self : int , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.token_embedder(snake_case__ ) UpperCAmelCase__ : Optional[int] = encoder_input_tokens.shape[1] UpperCAmelCase__ : List[Any] = torch.arange(snake_case__ , device=encoder_input_tokens.device ) x += self.position_encoding(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = self.dropout_pre(snake_case__ ) # inverted the attention mask UpperCAmelCase__ : List[Any] = encoder_input_tokens.size() UpperCAmelCase__ : Tuple = self.get_extended_attention_mask(snake_case__ , snake_case__ ) for lyr in self.encoders: UpperCAmelCase__ : Any = lyr(snake_case__ , snake_case__ )[0] UpperCAmelCase__ : Any = self.layer_norm(snake_case__ ) return self.dropout_post(snake_case__ ), encoder_inputs_mask
438
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> str: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise ValueError("iterations must be defined as integers" ) if not isinstance(snake_case , snake_case ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) UpperCAmelCase__ : str = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
438
1
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = (CMStochasticIterativeScheduler,) __snake_case = 1_0 def UpperCamelCase_ ( self , **__lowerCamelCase ) -> str: _SCREAMING_SNAKE_CASE : Optional[int] = { "num_train_timesteps": 2_0_1, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**__lowerCamelCase ) return config def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = 1_0 _SCREAMING_SNAKE_CASE : int = self.get_scheduler_config() _SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**__lowerCamelCase ) scheduler.set_timesteps(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.timesteps[0] _SCREAMING_SNAKE_CASE : int = scheduler.timesteps[1] _SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample _SCREAMING_SNAKE_CASE : int = 0.1 * sample _SCREAMING_SNAKE_CASE : List[str] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample _SCREAMING_SNAKE_CASE : str = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase_ ( self ) -> List[str]: for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def UpperCamelCase_ ( self ) -> List[str]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=__lowerCamelCase ) def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0] _SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config() _SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = 1 scheduler.set_timesteps(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.timesteps _SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model() _SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(__lowerCamelCase ): # 1. scale model input _SCREAMING_SNAKE_CASE : Tuple = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) # 2. predict noise residual _SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , __lowerCamelCase ) # 3. predict previous sample x_t-1 _SCREAMING_SNAKE_CASE : int = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample _SCREAMING_SNAKE_CASE : Dict = pred_prev_sample _SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 192.7614 ) < 1E-2 assert abs(result_mean.item() - 0.2510 ) < 1E-3 def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0] _SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() _SCREAMING_SNAKE_CASE : int = scheduler_class(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = [1_0_6, 0] scheduler.set_timesteps(timesteps=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = scheduler.timesteps _SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model() _SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input _SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) # 2. predict noise residual _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase , __lowerCamelCase ) # 3. predict previous sample x_t-1 _SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample _SCREAMING_SNAKE_CASE : Any = pred_prev_sample _SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 347.6357 ) < 1E-2 assert abs(result_mean.item() - 0.4527 ) < 1E-3 def UpperCamelCase_ ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0] _SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() _SCREAMING_SNAKE_CASE : int = scheduler_class(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = [3_9, 3_0, 1_2, 1_5, 0] with self.assertRaises(__lowerCamelCase , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=__lowerCamelCase ) def UpperCamelCase_ ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0] _SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config() _SCREAMING_SNAKE_CASE : Any = scheduler_class(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = [3_9, 3_0, 1_2, 1, 0] _SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase ) with self.assertRaises(__lowerCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase ) def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] _SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() _SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = [scheduler.config.num_train_timesteps] with self.assertRaises( __lowerCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=__lowerCamelCase )
381
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ ={ 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ =[ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
381
1