code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class a ( __lowerCAmelCase , unittest.TestCase ): _snake_case : List[str] = TextToVideoSDPipeline _snake_case : List[str] = TEXT_TO_IMAGE_PARAMS _snake_case : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. _snake_case : int = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'return_dict', 'callback', 'callback_steps', ] ) def lowerCAmelCase_ ( self : int ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) _UpperCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) _UpperCAmelCase = CLIPTextModel(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _UpperCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict=0 ): if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = TextToVideoSDPipeline(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = """np""" _UpperCAmelCase = sd_pipe(**_SCREAMING_SNAKE_CASE ).frames _UpperCAmelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _UpperCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase_ ( self : List[Any] ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCAmelCase_ ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCAmelCase_ ( self : Dict ): pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCAmelCase_ ( self : int ): pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def lowerCAmelCase_ ( self : int ): pass def lowerCAmelCase_ ( self : Optional[Any] ): return super().test_progress_bar() @slow @skip_mps class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) _UpperCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _UpperCAmelCase = pipe.to("""cuda""" ) _UpperCAmelCase = """Spiderman is surfing""" _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="""pt""" ).frames _UpperCAmelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) _UpperCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) _UpperCAmelCase = pipe.to("""cuda""" ) _UpperCAmelCase = """Spiderman is surfing""" _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""pt""" ).frames _UpperCAmelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
277
'''simple docstring''' from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image UpperCamelCase_ : int = ['''text''', '''image''', '''audio'''] def __a ( _UpperCamelCase: List[str] ) -> Dict: """simple docstring""" _snake_case = [] for input_type in input_types: if input_type == "text": inputs.append("Text input" ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3_000 ) ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): inputs.append(create_inputs(_UpperCamelCase ) ) else: raise ValueError(F"""Invalid type requested: {input_type}""" ) return inputs def __a ( _UpperCamelCase: List ) -> Dict: """simple docstring""" _snake_case = [] for output in outputs: if isinstance(_UpperCamelCase , (str, AgentText) ): output_types.append("text" ) elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ): output_types.append("image" ) elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ): output_types.append("audio" ) else: raise ValueError(F"""Invalid output: {output}""" ) return output_types @is_tool_test class _a : def _lowercase ( self ) -> Any: self.assertTrue(hasattr(self.tool ,"inputs" ) ) self.assertTrue(hasattr(self.tool ,"outputs" ) ) _snake_case = self.tool.inputs for _input in inputs: if isinstance(_input ,_SCREAMING_SNAKE_CASE ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) _snake_case = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def _lowercase ( self ) -> Any: _snake_case = create_inputs(self.tool.inputs ) _snake_case = self.tool(*_SCREAMING_SNAKE_CASE ) # There is a single output if len(self.tool.outputs ) == 1: _snake_case = [outputs] self.assertListEqual(output_types(_SCREAMING_SNAKE_CASE ) ,self.tool.outputs ) def _lowercase ( self ) -> str: self.assertTrue(hasattr(self.tool ,"description" ) ) self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) ) self.assertTrue(self.tool.description.startswith("This is a tool that" ) ) def _lowercase ( self ) -> Tuple: _snake_case = create_inputs(self.tool.inputs ) _snake_case = self.tool(*_SCREAMING_SNAKE_CASE ) if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): _snake_case = [outputs] self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(self.tool.outputs ) ) for output, output_type in zip(_SCREAMING_SNAKE_CASE ,self.tool.outputs ): _snake_case = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ) def _lowercase ( self ) -> Optional[Any]: _snake_case = create_inputs(self.tool.inputs ) _snake_case = [] for _input, input_type in zip(_SCREAMING_SNAKE_CASE ,self.tool.inputs ): if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error _snake_case = self.tool(*_SCREAMING_SNAKE_CASE ) if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): _snake_case = [outputs] self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(self.tool.outputs ) )
185
0
from __future__ import annotations import requests def __lowercase ( __lowerCAmelCase : str ): a__ = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(__lowerCAmelCase ).json() def __lowercase ( __lowerCAmelCase : int = 1_0 ): a__ = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' a__ = requests.get(__lowerCAmelCase ).json()[:max_stories] return [get_hackernews_story(__lowerCAmelCase ) for story_id in story_ids] def __lowercase ( __lowerCAmelCase : int = 1_0 ): a__ = hackernews_top_stories(__lowerCAmelCase ) return "\n".join('* [{title}]({url})'.format(**__lowerCAmelCase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
657
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case : Any = { '''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor'''] snake_case : int = ['''MobileViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Dict = [ '''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileViTForImageClassification''', '''MobileViTForSemanticSegmentation''', '''MobileViTModel''', '''MobileViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Tuple = [ '''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileViTForImageClassification''', '''TFMobileViTForSemanticSegmentation''', '''TFMobileViTModel''', '''TFMobileViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
657
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class UpperCamelCase : lowercase = 42 lowercase = 42 class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : list[list[Edge]] = [[] for _ in range(__UpperCamelCase )] lowercase_ : Optional[Any] = size def __getitem__( self ,__UpperCamelCase ) -> Iterator[Edge]: '''simple docstring''' return iter(self._graph[vertex] ) @property def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return self._size def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(__UpperCamelCase ,__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int | None: '''simple docstring''' lowercase_ : Union[str, Any] = deque([start_vertex] ) lowercase_ : list[int | None] = [None] * self.size lowercase_ : Union[str, Any] = 0 while queue: lowercase_ : int = queue.popleft() lowercase_ : Optional[Any] = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowercase_ : List[Any] = current_distance + edge.weight lowercase_ : int = distances[edge.destination_vertex] if ( isinstance(__UpperCamelCase ,__UpperCamelCase ) and new_distance >= dest_vertex_distance ): continue lowercase_ : Any = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
425
"""simple docstring""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ): # Initialise PyTorch model lowercase_ : Any = FunnelConfig.from_json_file(__SCREAMING_SNAKE_CASE ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase_ : int = FunnelBaseModel(__SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(__SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_funnel(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
425
1
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=False , ) -> Optional[Any]: """simple docstring""" snake_case__ : Optional[Any] = size if size is not None else {'''height''': 20, '''width''': 20} snake_case__ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} snake_case__ : str = parent snake_case__ : Optional[Any] = batch_size snake_case__ : str = num_channels snake_case__ : Union[str, Any] = image_size snake_case__ : Tuple = min_resolution snake_case__ : str = max_resolution snake_case__ : Optional[Any] = do_resize snake_case__ : List[str] = size snake_case__ : Optional[Any] = do_center_crop snake_case__ : Tuple = crop_size snake_case__ : Any = do_normalize snake_case__ : int = image_mean snake_case__ : Union[str, Any] = image_std snake_case__ : List[Any] = do_reduce_labels def lowercase__ ( self ) -> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _A ( ): snake_case__ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case__ : List[Any] = Image.open(dataset[0]['''file'''] ) snake_case__ : Optional[int] = Image.open(dataset[1]['''file'''] ) return image, map def _A ( ): snake_case__ : List[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case__ : Union[str, Any] = Image.open(ds[0]['''file'''] ) snake_case__ : Union[str, Any] = Image.open(ds[1]['''file'''] ) snake_case__ : Dict = Image.open(ds[2]['''file'''] ) snake_case__ : Optional[int] = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase = BeitImageProcessor if is_vision_available() else None def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Optional[int] = BeitImageProcessingTester(self ) @property def lowercase__ ( self ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCamelCase , '''size''' ) ) self.assertTrue(hasattr(lowerCamelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCamelCase , '''center_crop''' ) ) self.assertTrue(hasattr(lowerCamelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCamelCase , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCamelCase , '''image_std''' ) ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase ) snake_case__ : Any = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCamelCase ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase ) def lowercase__ ( self ) -> List[str]: """simple docstring""" pass def lowercase__ ( self ) -> Tuple: """simple docstring""" snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case__ : List[str] = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case__ : Dict = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase__ ( self ) -> int: """simple docstring""" snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input snake_case__ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case__ : Optional[int] = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase__ ( self ) -> List[str]: """simple docstring""" snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) snake_case__ : List[str] = [] for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input snake_case__ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched snake_case__ : Tuple = image_processing(lowerCamelCase , lowerCamelCase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test not batched input (PIL images) snake_case__ ,snake_case__ : List[Any] = prepare_semantic_single_inputs() snake_case__ : Any = image_processing(lowerCamelCase , lowerCamelCase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched input (PIL images) snake_case__ ,snake_case__ : Union[str, Any] = prepare_semantic_batch_inputs() snake_case__ : Dict = image_processing(lowerCamelCase , lowerCamelCase , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) def lowercase__ ( self ) -> Optional[int]: """simple docstring""" snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 snake_case__ ,snake_case__ : Optional[int] = prepare_semantic_single_inputs() snake_case__ : Optional[int] = image_processing(lowerCamelCase , lowerCamelCase , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 150 ) snake_case__ : List[str] = True snake_case__ : Tuple = image_processing(lowerCamelCase , lowerCamelCase , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 )
694
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple: """simple docstring""" snake_case__ : Optional[Any] = 1.0 if scale is None else scale snake_case__ : Dict = 0.0 if loc is None else loc super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] ) @property def lowercase__ ( self ) -> Dict: """simple docstring""" return self.base_dist.mean * self.scale + self.loc @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" return self.base_dist.variance * self.scale**2 @property def lowercase__ ( self ) -> List[str]: """simple docstring""" return self.variance.sqrt() class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None: """simple docstring""" super().__init__(**lowerCamelCase ) snake_case__ : Tuple = args_dim snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] ) snake_case__ : Optional[int] = domain_map def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]: """simple docstring""" snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj] return self.domain_map(*lowerCamelCase ) class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" super().__init__() snake_case__ : Tuple = function def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]: """simple docstring""" return self.function(lowerCamelCase , *lowerCamelCase ) class snake_case : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 def __init__( self , lowerCamelCase = 1 ) -> None: """simple docstring""" snake_case__ : Optional[Any] = dim snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim} def lowercase__ ( self , lowerCamelCase ) -> int: """simple docstring""" if self.dim == 1: return self.distribution_class(*lowerCamelCase ) else: return Independent(self.distribution_class(*lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution: """simple docstring""" snake_case__ : List[Any] = self._base_distribution(lowerCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim ) @property def lowercase__ ( self ) -> Tuple: """simple docstring""" return () if self.dim == 1 else (self.dim,) @property def lowercase__ ( self ) -> int: """simple docstring""" return len(self.event_shape ) @property def lowercase__ ( self ) -> float: """simple docstring""" return 0.0 def lowercase__ ( self , lowerCamelCase ) -> nn.Module: """simple docstring""" return ParameterProjection( in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowercase__ ( self , *lowerCamelCase ) -> Any: """simple docstring""" raise NotImplementedError() @staticmethod def lowercase__ ( lowerCamelCase ) -> torch.Tensor: """simple docstring""" return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0 class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1} _lowerCAmelCase = StudentT @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int: """simple docstring""" snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"loc": 1, "scale": 1} _lowerCAmelCase = Normal @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {"total_count": 1, "logits": 1} _lowerCAmelCase = NegativeBinomial @classmethod def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : List[str] = cls.squareplus(lowerCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowercase__ ( self , lowerCamelCase ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : str = distr_args if self.dim == 1: return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) else: return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution: """simple docstring""" snake_case__ ,snake_case__ : Optional[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
1
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
10
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __a ( _lowerCAmelCase , unittest.TestCase ): UpperCamelCase_ : Optional[int] = LongformerTokenizer UpperCamelCase_ : str = True UpperCamelCase_ : Tuple = LongformerTokenizerFast UpperCamelCase_ : List[str] = True def _SCREAMING_SNAKE_CASE ( self : Dict )-> Any: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] UpperCamelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) UpperCamelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCamelCase = {"unk_token": "<unk>"} UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **UpperCAmelCase_ : Dict )-> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , **UpperCAmelCase_ : Optional[Any] )-> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Dict )-> List[Any]: """simple docstring""" UpperCamelCase = "lower newer" UpperCamelCase = "lower newer" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Optional[Any]: """simple docstring""" UpperCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase = "lower newer" UpperCamelCase = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] UpperCamelCase = tokenizer.tokenize(UpperCAmelCase_ ) # , add_prefix_space=True) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = tokens + [tokenizer.unk_token] UpperCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Tuple: """simple docstring""" UpperCamelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCAmelCase_ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCAmelCase_ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def _SCREAMING_SNAKE_CASE ( self : int )-> int: """simple docstring""" UpperCamelCase = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) UpperCamelCase = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase_ ) UpperCamelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase_ ) UpperCamelCase = tokenizer.encode( "sequence builders" , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ ) UpperCamelCase = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ ) UpperCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) UpperCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Optional[Any]: """simple docstring""" UpperCamelCase = self.get_tokenizer() UpperCamelCase = "Encode this sequence." UpperCamelCase = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments UpperCamelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ ) UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ ) UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) UpperCamelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Testing spaces after special tokens UpperCamelCase = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )} ) # mask token has a left space UpperCamelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) UpperCamelCase = "Encode <mask> sequence" UpperCamelCase = "Encode <mask>sequence" UpperCamelCase = tokenizer.encode(UpperCAmelCase_ ) UpperCamelCase = encoded.index(UpperCAmelCase_ ) UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCamelCase = tokenizer.encode(UpperCAmelCase_ ) UpperCamelCase = encoded.index(UpperCAmelCase_ ) UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> List[str]: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Tuple: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) UpperCamelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) UpperCamelCase = "A, <mask> AllenNLP sentence." UpperCamelCase = tokenizer_r.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ ) UpperCamelCase = tokenizer_p.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( UpperCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def _SCREAMING_SNAKE_CASE ( self : Any )-> Dict: """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCamelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCAmelCase_ ) self.assertEqual(post_processor_state["add_prefix_space"] , UpperCAmelCase_ ) self.assertEqual(post_processor_state["trim_offsets"] , UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Union[str, Any]: """simple docstring""" # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase = "hello" # `hello` is a token in the vocabulary of `pretrained_name` UpperCamelCase = f"{text_of_1_token} {text_of_1_token}" UpperCamelCase = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase_ ) + 1, len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) UpperCamelCase = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase_ ) + 1, len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) UpperCamelCase = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) UpperCamelCase = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) UpperCamelCase = f" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCamelCase = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_ ) + 1, 1 + len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) UpperCamelCase = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_ ), 1 + len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , ) UpperCamelCase = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ ) UpperCamelCase = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_ ), 1 + len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
554
0
"""simple docstring""" def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _enforce_args(UpperCamelCase__ , UpperCamelCase__ ) if n == 0: return 0 A__ = float('-inf' ) for i in range(1 , n + 1 ): A__ = max( UpperCamelCase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase__ ) ) return max_revue def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _enforce_args(UpperCamelCase__ , UpperCamelCase__ ) A__ = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: A__ = float('-inf' ) for i in range(1 , n + 1 ): A__ = max( UpperCamelCase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase__ , UpperCamelCase__ ) , ) A__ = max_revenue return max_rev[n] def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _enforce_args(UpperCamelCase__ , UpperCamelCase__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. A__ = [float('-inf' ) for _ in range(n + 1 )] A__ = 0 for i in range(1 , n + 1 ): A__ = max_rev[i] for j in range(1 , i + 1 ): A__ = max(UpperCamelCase__ , prices[j - 1] + max_rev[i - j] ) A__ = max_revenue_i return max_rev[n] def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" if n < 0: A__ = F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(UpperCamelCase__ ) if n > len(UpperCamelCase__ ): A__ = ( 'Each integral piece of rod must have a corresponding price. ' F'''Got n = {n} but length of prices = {len(UpperCamelCase__ )}''' ) raise ValueError(UpperCamelCase__ ) def UpperCAmelCase ( ): """simple docstring""" A__ = [6, 10, 12, 15, 20, 23] A__ = len(UpperCamelCase__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. A__ = 36 A__ = top_down_cut_rod(UpperCamelCase__ , UpperCamelCase__ ) A__ = bottom_up_cut_rod(UpperCamelCase__ , UpperCamelCase__ ) A__ = naive_cut_rod_recursive(UpperCamelCase__ , UpperCamelCase__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
536
"""simple docstring""" import unittest import torch from torch import nn from diffusers.models.activations import get_activation class UpperCamelCase__( unittest.TestCase ): def snake_case__ ( self ) -> str: A__ = get_activation('swish' ) self.assertIsInstance(__UpperCAmelCase ,nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 ) self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 ) def snake_case__ ( self ) -> Optional[Any]: A__ = get_activation('silu' ) self.assertIsInstance(__UpperCAmelCase ,nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 ) self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 ) def snake_case__ ( self ) -> List[str]: A__ = get_activation('mish' ) self.assertIsInstance(__UpperCAmelCase ,nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 ,dtype=torch.floataa ) ).item() ,0 ) self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 ) def snake_case__ ( self ) -> List[str]: A__ = get_activation('gelu' ) self.assertIsInstance(__UpperCAmelCase ,nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 ,dtype=torch.floataa ) ).item() ,0 ) self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 ) self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
536
1
"""simple docstring""" from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
49
"""simple docstring""" def lowercase__ ( snake_case_ :Dict ): # noqa: E741 __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = [0] * n __UpperCAmelCase = [False] * n __UpperCAmelCase = [False] * n def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ): if parent == root: out_edge_count += 1 __UpperCAmelCase = True __UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: __UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __UpperCAmelCase = True # AP found via cycle if at == low[to]: __UpperCAmelCase = True else: __UpperCAmelCase = min(low[at] , snake_case_ ) return out_edge_count for i in range(snake_case_ ): if not visited[i]: __UpperCAmelCase = 0 __UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ ) __UpperCAmelCase = out_edge_count > 1 for x in range(len(snake_case_ ) ): if is_art[x] is True: print(snake_case_ ) # Adjacency list of graph _lowercase : Optional[Any] = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
49
1
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
456
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : Optional[int] , snake_case_ : int , snake_case_ : Optional[Any]=13 , snake_case_ : List[str]=7 , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Optional[Any]=True , snake_case_ : List[Any]=False , snake_case_ : str=False , snake_case_ : List[str]=False , snake_case_ : List[Any]=2 , snake_case_ : List[Any]=99 , snake_case_ : Tuple=0 , snake_case_ : Optional[int]=32 , snake_case_ : Union[str, Any]=5 , snake_case_ : Any=4 , snake_case_ : str=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Optional[Any]=5_12 , snake_case_ : str=2 , snake_case_ : List[str]=0.0_2 , snake_case_ : Optional[int]=2 , snake_case_ : Union[str, Any]=4 , snake_case_ : str="last" , snake_case_ : Union[str, Any]=True , snake_case_ : int=None , snake_case_ : Any=0 , )-> Tuple: __lowerCAmelCase =parent __lowerCAmelCase =batch_size __lowerCAmelCase =seq_length __lowerCAmelCase =is_training __lowerCAmelCase =use_input_lengths __lowerCAmelCase =use_token_type_ids __lowerCAmelCase =use_labels __lowerCAmelCase =gelu_activation __lowerCAmelCase =sinusoidal_embeddings __lowerCAmelCase =causal __lowerCAmelCase =asm __lowerCAmelCase =n_langs __lowerCAmelCase =vocab_size __lowerCAmelCase =n_special __lowerCAmelCase =hidden_size __lowerCAmelCase =num_hidden_layers __lowerCAmelCase =num_attention_heads __lowerCAmelCase =hidden_dropout_prob __lowerCAmelCase =attention_probs_dropout_prob __lowerCAmelCase =max_position_embeddings __lowerCAmelCase =type_sequence_label_size __lowerCAmelCase =initializer_range __lowerCAmelCase =num_labels __lowerCAmelCase =num_choices __lowerCAmelCase =summary_type __lowerCAmelCase =use_proj __lowerCAmelCase =scope __lowerCAmelCase =bos_token_id def UpperCamelCase ( self : Tuple)-> int: __lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length]) __lowerCAmelCase =None if self.use_input_lengths: __lowerCAmelCase =( ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length __lowerCAmelCase =None if self.use_token_type_ids: __lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs) __lowerCAmelCase =None __lowerCAmelCase =None __lowerCAmelCase =None if self.use_labels: __lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size) __lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __lowerCAmelCase =ids_tensor([self.batch_size] , 2).float() __lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices) __lowerCAmelCase =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCamelCase ( self : Tuple)-> Union[str, Any]: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def UpperCamelCase ( self : Any , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : List[Any] , )-> Tuple: __lowerCAmelCase =XLMModel(config=snake_case_) model.to(snake_case_) model.eval() __lowerCAmelCase =model(snake_case_ , lengths=snake_case_ , langs=snake_case_) __lowerCAmelCase =model(snake_case_ , langs=snake_case_) __lowerCAmelCase =model(snake_case_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase ( self : str , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : int , )-> Dict: __lowerCAmelCase =XLMWithLMHeadModel(snake_case_) model.to(snake_case_) model.eval() __lowerCAmelCase =model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Tuple , )-> Dict: __lowerCAmelCase =XLMForQuestionAnsweringSimple(snake_case_) model.to(snake_case_) model.eval() __lowerCAmelCase =model(snake_case_) __lowerCAmelCase =model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_) __lowerCAmelCase =outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCamelCase ( self : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Tuple , )-> Optional[int]: __lowerCAmelCase =XLMForQuestionAnswering(snake_case_) model.to(snake_case_) model.eval() __lowerCAmelCase =model(snake_case_) __lowerCAmelCase =model( snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , p_mask=snake_case_ , ) __lowerCAmelCase =model( snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , ) ((__lowerCAmelCase) , ) =result_with_labels.to_tuple() __lowerCAmelCase =model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_) ((__lowerCAmelCase) , ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , ()) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,)) def UpperCamelCase ( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any] , )-> int: __lowerCAmelCase =XLMForSequenceClassification(snake_case_) model.to(snake_case_) model.eval() __lowerCAmelCase =model(snake_case_) __lowerCAmelCase =model(snake_case_ , labels=snake_case_) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def UpperCamelCase ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : str , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Union[str, Any] , )-> Optional[Any]: __lowerCAmelCase =self.num_labels __lowerCAmelCase =XLMForTokenClassification(snake_case_) model.to(snake_case_) model.eval() __lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Tuple , )-> List[Any]: __lowerCAmelCase =self.num_choices __lowerCAmelCase =XLMForMultipleChoice(config=snake_case_) model.to(snake_case_) model.eval() __lowerCAmelCase =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __lowerCAmelCase =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __lowerCAmelCase =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __lowerCAmelCase =model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCamelCase ( self : Optional[Any])-> List[Any]: __lowerCAmelCase =self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) =config_and_inputs __lowerCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths} return config, inputs_dict @require_torch class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable SCREAMING_SNAKE_CASE = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase ( self : List[str] , snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : int , snake_case_ : str)-> Tuple: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCamelCase ( self : int , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : str=False)-> str: __lowerCAmelCase =super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowerCAmelCase =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case_) __lowerCAmelCase =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case_) return inputs_dict def UpperCamelCase ( self : int)-> List[Any]: __lowerCAmelCase =XLMModelTester(self) __lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , emb_dim=37) def UpperCamelCase ( self : List[str])-> Any: self.config_tester.run_common_tests() def UpperCamelCase ( self : Optional[int])-> Union[str, Any]: __lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*snake_case_) def UpperCamelCase ( self : Any)-> int: __lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*snake_case_) def UpperCamelCase ( self : Dict)-> str: __lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*snake_case_) def UpperCamelCase ( self : List[Any])-> Union[str, Any]: __lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*snake_case_) def UpperCamelCase ( self : List[str])-> Dict: __lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*snake_case_) def UpperCamelCase ( self : List[Any])-> int: __lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*snake_case_) def UpperCamelCase ( self : str)-> str: __lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case_) def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : Optional[int]=False , snake_case_ : List[Any]=1)-> Optional[Any]: self.assertIsInstance(snake_case_ , snake_case_) self.assertListEqual( [isinstance(snake_case_ , snake_case_) for iter_attentions in attentions] , [True] * len(snake_case_)) self.assertEqual(len(snake_case_) , (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(snake_case_): # adds PAD dummy token __lowerCAmelCase =min_length + idx + 1 __lowerCAmelCase =min_length + idx + 1 __lowerCAmelCase =( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case_)) def UpperCamelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : str , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[str]=False , snake_case_ : Any=1)-> Any: self.assertIsInstance(snake_case_ , snake_case_) self.assertListEqual( [isinstance(snake_case_ , snake_case_) for iter_hidden_states in hidden_states] , [True] * len(snake_case_) , ) self.assertEqual(len(snake_case_) , (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(snake_case_): # adds PAD dummy token __lowerCAmelCase =min_length + idx + 1 __lowerCAmelCase =(batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case_) , ) pass @slow def UpperCamelCase ( self : Any)-> List[str]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase =XLMModel.from_pretrained(snake_case_) self.assertIsNotNone(snake_case_) @require_torch class __a ( unittest.TestCase ): @slow def UpperCamelCase ( self : int)-> List[str]: __lowerCAmelCase =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""") model.to(snake_case_) __lowerCAmelCase =torch.tensor([[14, 4_47]] , dtype=torch.long , device=snake_case_) # the president __lowerCAmelCase =[ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowerCAmelCase =model.generate(snake_case_ , do_sample=snake_case_) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case_)
456
1
"""simple docstring""" import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Dict, UpperCAmelCase_ : str ) -> List[str]: """simple docstring""" A__ = os.path.abspath(UpperCAmelCase_ ) logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model A__ = tf.train.list_variables(UpperCAmelCase_ ) A__ = [] A__ = [] A__ = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") A__ = full_name.split("/" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(F"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' A__ = name[1:] # figure out how many levels deep the name is A__ = 0 for _name in name: if _name.startswith("layer_with_weights" ): depth += 1 else: break layer_depth.append(UpperCAmelCase_ ) # read data A__ = tf.train.load_variable(UpperCAmelCase_, UpperCAmelCase_ ) names.append("/".join(UpperCAmelCase_ ) ) arrays.append(UpperCAmelCase_ ) logger.info(F"""Read a total of {len(UpperCAmelCase_ ):,} layers""" ) # Sanity check if len(set(UpperCAmelCase_ ) ) != 1: raise ValueError(F"""Found layer names with different depths (layer depth {list(set(UpperCAmelCase_ ) )})""" ) A__ = list(set(UpperCAmelCase_ ) )[0] if layer_depth != 1: raise ValueError( "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP" " heads." ) # convert layers logger.info("Converting weights..." ) for full_name, array in zip(UpperCAmelCase_, UpperCAmelCase_ ): A__ = full_name.split("/" ) A__ = model A__ = [] for i, m_name in enumerate(UpperCAmelCase_ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("layer_with_weights" ): A__ = int(m_name.split("-" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["embeddings", "LayerNorm"] ) A__ = getattr(UpperCAmelCase_, "embeddings" ) A__ = getattr(UpperCAmelCase_, "LayerNorm" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["encoder", "layer", str(layer_num - 4 )] ) A__ = getattr(UpperCAmelCase_, "encoder" ) A__ = getattr(UpperCAmelCase_, "layer" ) A__ = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["pooler", "dense"] ) A__ = getattr(UpperCAmelCase_, "pooler" ) A__ = getattr(UpperCAmelCase_, "dense" ) elif m_name == "embeddings": trace.append("embeddings" ) A__ = getattr(UpperCAmelCase_, "embeddings" ) if layer_num == 0: trace.append("word_embeddings" ) A__ = getattr(UpperCAmelCase_, "word_embeddings" ) elif layer_num == 1: trace.append("position_embeddings" ) A__ = getattr(UpperCAmelCase_, "position_embeddings" ) elif layer_num == 2: trace.append("token_type_embeddings" ) A__ = getattr(UpperCAmelCase_, "token_type_embeddings" ) else: raise ValueError(F"""Unknown embedding layer with name {full_name}""" ) trace.append("weight" ) A__ = getattr(UpperCAmelCase_, "weight" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["attention", "self"] ) A__ = getattr(UpperCAmelCase_, "attention" ) A__ = getattr(UpperCAmelCase_, "self" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["attention", "output", "LayerNorm"] ) A__ = getattr(UpperCAmelCase_, "attention" ) A__ = getattr(UpperCAmelCase_, "output" ) A__ = getattr(UpperCAmelCase_, "LayerNorm" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["attention", "output", "dense"] ) A__ = getattr(UpperCAmelCase_, "attention" ) A__ = getattr(UpperCAmelCase_, "output" ) A__ = getattr(UpperCAmelCase_, "dense" ) elif m_name == "_output_dense": # output dense trace.extend(["output", "dense"] ) A__ = getattr(UpperCAmelCase_, "output" ) A__ = getattr(UpperCAmelCase_, "dense" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["output", "LayerNorm"] ) A__ = getattr(UpperCAmelCase_, "output" ) A__ = getattr(UpperCAmelCase_, "LayerNorm" ) elif m_name == "_key_dense": # attention key trace.append("key" ) A__ = getattr(UpperCAmelCase_, "key" ) elif m_name == "_query_dense": # attention query trace.append("query" ) A__ = getattr(UpperCAmelCase_, "query" ) elif m_name == "_value_dense": # attention value trace.append("value" ) A__ = getattr(UpperCAmelCase_, "value" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["intermediate", "dense"] ) A__ = getattr(UpperCAmelCase_, "intermediate" ) A__ = getattr(UpperCAmelCase_, "dense" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("output" ) A__ = getattr(UpperCAmelCase_, "output" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("bias" ) A__ = getattr(UpperCAmelCase_, "bias" ) elif m_name in ["kernel", "gamma"]: trace.append("weight" ) A__ = getattr(UpperCAmelCase_, "weight" ) else: logger.warning(F"""Ignored {m_name}""" ) # for certain layers reshape is necessary A__ = ".".join(UpperCAmelCase_ ) if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)", UpperCAmelCase_ ) or re.match( r"(\S+)\.attention\.output\.dense\.weight", UpperCAmelCase_ ): A__ = array.reshape(pointer.data.shape ) if "kernel" in full_name: A__ = array.transpose() if pointer.shape == array.shape: A__ = torch.from_numpy(UpperCAmelCase_ ) else: raise ValueError( F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" F""" {array.shape}""" ) logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def _lowerCamelCase ( UpperCAmelCase_ : Any, UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Any ) -> List[str]: """simple docstring""" logger.info(F"""Loading model based on config from {config_path}...""" ) A__ = BertConfig.from_json_file(UpperCAmelCase_ ) A__ = BertModel(UpperCAmelCase_ ) # Load weights from checkpoint logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) # Save pytorch-model logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict(), UpperCAmelCase_ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model (must include filename).""", ) UpperCamelCase = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
104
from typing import List from .keymap import KEYMAP, get_character def lowercase__ ( A_: str ) -> str: """simple docstring""" def decorator(A_: int ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += [key] setattr(A_ , """handle_key""" , A_ ) return func return decorator def lowercase__ ( *A_: List[str] ) -> Optional[int]: """simple docstring""" def decorator(A_: Tuple ): __UpperCAmelCase =getattr(A_ , """handle_key""" , [] ) handle += keys setattr(A_ , """handle_key""" , A_ ) return func return decorator class _A ( UpperCamelCase ): """simple docstring""" def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int: __UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ): setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} ) setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): __UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] ) for key in handled_keys: __UpperCAmelCase =value return new_cls @staticmethod def _a ( cls : Dict ) -> List[Any]: __UpperCAmelCase =get_character() if char != KEYMAP["undefined"]: __UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE ) if handler: __UpperCAmelCase =char return handler(cls ) else: return None def lowercase__ ( cls: str ) -> int: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
68
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def lowerCamelCase__ ( __lowerCAmelCase : Dict ): """simple docstring""" lowerCAmelCase_ = 384 if "tiny" in model_name: lowerCAmelCase_ = [3, 3, 9, 3] lowerCAmelCase_ = [96, 192, 384, 768] if "small" in model_name: lowerCAmelCase_ = [3, 3, 27, 3] lowerCAmelCase_ = [96, 192, 384, 768] if "base" in model_name: lowerCAmelCase_ = [3, 3, 27, 3] lowerCAmelCase_ = [128, 256, 512, 1024] lowerCAmelCase_ = 512 if "large" in model_name: lowerCAmelCase_ = [3, 3, 27, 3] lowerCAmelCase_ = [192, 384, 768, 1536] lowerCAmelCase_ = 768 if "xlarge" in model_name: lowerCAmelCase_ = [3, 3, 27, 3] lowerCAmelCase_ = [256, 512, 1024, 2048] lowerCAmelCase_ = 1024 # set label information lowerCAmelCase_ = 150 lowerCAmelCase_ = "huggingface/label-files" lowerCAmelCase_ = "ade20k-id2label.json" lowerCAmelCase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) ) lowerCAmelCase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = ConvNextConfig( depths=__lowerCAmelCase , hidden_sizes=__lowerCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] ) lowerCAmelCase_ = UperNetConfig( backbone_config=__lowerCAmelCase , auxiliary_in_channels=__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , ) return config def lowerCamelCase__ ( __lowerCAmelCase : Dict ): """simple docstring""" lowerCAmelCase_ = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") ) if i > 0: rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def lowerCamelCase__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ): """simple docstring""" lowerCAmelCase_ = dct.pop(__lowerCAmelCase ) lowerCAmelCase_ = val def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ): """simple docstring""" lowerCAmelCase_ = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } lowerCAmelCase_ = model_name_to_url[model_name] lowerCAmelCase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )["state_dict"] lowerCAmelCase_ = get_upernet_config(__lowerCAmelCase ) lowerCAmelCase_ = UperNetForSemanticSegmentation(__lowerCAmelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(__lowerCAmelCase ) if "bn" in key: lowerCAmelCase_ = key.replace("bn" , "batch_norm" ) lowerCAmelCase_ = val # rename keys lowerCAmelCase_ = create_rename_keys(__lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) # verify on image lowerCAmelCase_ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" lowerCAmelCase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("RGB" ) lowerCAmelCase_ = SegformerImageProcessor() lowerCAmelCase_ = processor(__lowerCAmelCase , return_tensors="pt" ).pixel_values with torch.no_grad(): lowerCAmelCase_ = model(__lowerCAmelCase ) if model_name == "upernet-convnext-tiny": lowerCAmelCase_ = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ) elif model_name == "upernet-convnext-small": lowerCAmelCase_ = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] ) elif model_name == "upernet-convnext-base": lowerCAmelCase_ = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] ) elif model_name == "upernet-convnext-large": lowerCAmelCase_ = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] ) elif model_name == "upernet-convnext-xlarge": lowerCAmelCase_ = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCAmelCase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-convnext-tiny", type=str, choices=[f"""upernet-convnext-{size}""" for size in ["tiny", "small", "base", "large", "xlarge"]], help="Name of the ConvNext UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
705
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger _A = get_logger(__name__) _A = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n" class _lowerCAmelCase : @add_start_docstrings(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray: raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class _lowerCAmelCase : @add_start_docstrings(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray: raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class _lowerCAmelCase ( __a ): @add_start_docstrings(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> jnp.ndarray: for processor in self: lowerCAmelCase_ = inspect.signature(processor.__call__ ).parameters if len(_UpperCamelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f"""Make sure that all the required parameters: {list(function_args.keys() )} for """ f"""{processor.__class__} are passed to the logits processor.""" ) lowerCAmelCase_ = processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) else: lowerCAmelCase_ = processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return scores class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase ) -> Tuple: if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not (temperature > 0): raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" ) lowerCAmelCase_ = temperature def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray: lowerCAmelCase_ = scores / self.temperature return scores class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase , _UpperCamelCase = -float("Inf" ) , _UpperCamelCase = 1 ) -> Union[str, Any]: if not isinstance(_UpperCamelCase , _UpperCamelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" ) if not isinstance(_UpperCamelCase , _UpperCamelCase ) or (min_tokens_to_keep < 1): raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" ) lowerCAmelCase_ = top_p lowerCAmelCase_ = filter_value lowerCAmelCase_ = min_tokens_to_keep def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray: lowerCAmelCase_ , lowerCAmelCase_ = lax.top_k(_UpperCamelCase , scores.shape[-1] ) lowerCAmelCase_ = jnp.full_like(_UpperCamelCase , self.filter_value ) lowerCAmelCase_ = jax.nn.softmax(_UpperCamelCase , axis=-1 ).cumsum(axis=-1 ) lowerCAmelCase_ = cumulative_probs < self.top_p # include the token that is higher than top_p as well lowerCAmelCase_ = jnp.roll(_UpperCamelCase , 1 ) score_mask |= score_mask.at[:, 0].set(_UpperCamelCase ) # min tokens to keep lowerCAmelCase_ = score_mask.at[:, : self.min_tokens_to_keep].set(_UpperCamelCase ) lowerCAmelCase_ = jnp.where(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = jax.lax.sort_key_val(_UpperCamelCase , _UpperCamelCase )[-1] return next_scores class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase , _UpperCamelCase = -float("Inf" ) , _UpperCamelCase = 1 ) -> List[Any]: if not isinstance(_UpperCamelCase , _UpperCamelCase ) or top_k <= 0: raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" ) lowerCAmelCase_ = max(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = filter_value def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray: lowerCAmelCase_ , lowerCAmelCase_ = scores.shape lowerCAmelCase_ = jnp.full(batch_size * vocab_size , self.filter_value ) lowerCAmelCase_ = min(self.top_k , scores.shape[-1] ) # Safety check lowerCAmelCase_ , lowerCAmelCase_ = lax.top_k(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = jnp.broadcast_to((jnp.arange(_UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() lowerCAmelCase_ = topk_scores.flatten() lowerCAmelCase_ = topk_indices.flatten() + shift lowerCAmelCase_ = next_scores_flat.at[topk_indices_flat].set(_UpperCamelCase ) lowerCAmelCase_ = next_scores_flat.reshape(_UpperCamelCase , _UpperCamelCase ) return next_scores class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase ) -> Any: lowerCAmelCase_ = bos_token_id def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray: lowerCAmelCase_ = jnp.full(scores.shape , -float("inf" ) ) lowerCAmelCase_ = 1 - jnp.bool_(cur_len - 1 ) lowerCAmelCase_ = jnp.where(_UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _UpperCamelCase ) return scores class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> List[str]: lowerCAmelCase_ = max_length lowerCAmelCase_ = eos_token_id def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray: lowerCAmelCase_ = jnp.full(scores.shape , -float("inf" ) ) lowerCAmelCase_ = 1 - jnp.bool_(cur_len - self.max_length + 1 ) lowerCAmelCase_ = jnp.where(_UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _UpperCamelCase ) return scores class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: if not isinstance(_UpperCamelCase , _UpperCamelCase ) or min_length < 0: raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" ) if not isinstance(_UpperCamelCase , _UpperCamelCase ) or eos_token_id < 0: raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" ) lowerCAmelCase_ = min_length lowerCAmelCase_ = eos_token_id def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray: # create boolean flag to decide if min length penalty should be applied lowerCAmelCase_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) lowerCAmelCase_ = jnp.where(_UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _UpperCamelCase ) return scores class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> int: lowerCAmelCase_ = list(_UpperCamelCase ) lowerCAmelCase_ = begin_index def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int: lowerCAmelCase_ = 1 - jnp.bool_(cur_len - self.begin_index ) lowerCAmelCase_ = jnp.where(_UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _UpperCamelCase ) return scores class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase ) -> Optional[Any]: lowerCAmelCase_ = list(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray: lowerCAmelCase_ = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase ) -> List[Any]: lowerCAmelCase_ = dict(_UpperCamelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. lowerCAmelCase_ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: lowerCAmelCase_ = force_token_array.at[index].set(_UpperCamelCase ) lowerCAmelCase_ = jnp.intaa(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray: def _force_token(_UpperCamelCase ): lowerCAmelCase_ = scores.shape[0] lowerCAmelCase_ = self.force_token_array[generation_idx] lowerCAmelCase_ = jnp.ones_like(_UpperCamelCase , dtype=scores.dtype ) * -float("inf" ) lowerCAmelCase_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) lowerCAmelCase_ = lax.dynamic_update_slice(_UpperCamelCase , _UpperCamelCase , (0, current_token) ) return new_scores lowerCAmelCase_ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(_UpperCamelCase ) , lambda: scores , ) , ) return scores class _lowerCAmelCase ( __a ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: lowerCAmelCase_ = generate_config.eos_token_id lowerCAmelCase_ = generate_config.no_timestamps_token_id lowerCAmelCase_ = generate_config.no_timestamps_token_id + 1 lowerCAmelCase_ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(_UpperCamelCase , "max_initial_timestamp_index" ): lowerCAmelCase_ = generate_config.max_initial_timestamp_index else: lowerCAmelCase_ = model_config.vocab_size if self.max_initial_timestamp_index is None: lowerCAmelCase_ = model_config.vocab_size def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: # suppress <|notimestamps|> which is handled by without_timestamps lowerCAmelCase_ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(_UpperCamelCase , _UpperCamelCase ): lowerCAmelCase_ = jnp.where((cur_len - self.begin_index) >= 1 , _UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _UpperCamelCase , ) lowerCAmelCase_ = jnp.where((cur_len - self.begin_index) < 2 , _UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , _UpperCamelCase , _UpperCamelCase , ) return jnp.where( _UpperCamelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _UpperCamelCase , ) lowerCAmelCase_ = jax.vmap(_UpperCamelCase )(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = jnp.where(cur_len == self.begin_index , _UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _UpperCamelCase , ) lowerCAmelCase_ = self.timestamp_begin + self.max_initial_timestamp_index lowerCAmelCase_ = jnp.where( _UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _UpperCamelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp lowerCAmelCase_ = jax.nn.log_softmax(_UpperCamelCase , axis=-1 ) def handle_cumulative_probs(_UpperCamelCase , _UpperCamelCase ): lowerCAmelCase_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) lowerCAmelCase_ = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _UpperCamelCase , ) lowerCAmelCase_ = jax.vmap(_UpperCamelCase )(_UpperCamelCase , _UpperCamelCase ) return scores
279
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase : str , UpperCamelCase : int=7 , UpperCamelCase : Dict=3 , UpperCamelCase : Optional[Any]=30 , UpperCamelCase : Tuple=400 , UpperCamelCase : str=True , UpperCamelCase : int=None , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , UpperCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]=1 / 255 , UpperCamelCase : Optional[int]=True , ): '''simple docstring''' __UpperCAmelCase : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} __UpperCAmelCase : Optional[int] = parent __UpperCAmelCase : List[Any] = batch_size __UpperCAmelCase : Union[str, Any] = num_channels __UpperCAmelCase : Tuple = min_resolution __UpperCAmelCase : List[Any] = max_resolution __UpperCAmelCase : Any = do_resize __UpperCAmelCase : Optional[Any] = size __UpperCAmelCase : str = do_normalize __UpperCAmelCase : Tuple = image_mean __UpperCAmelCase : Dict = image_std __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : int = rescale_factor __UpperCAmelCase : Optional[int] = do_pad def lowerCamelCase__ ( self : Any ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : str=False ): '''simple docstring''' if not batched: __UpperCAmelCase : Any = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): __UpperCAmelCase : Optional[int] = image.size else: __UpperCAmelCase : Optional[int] = image.shape[1], image.shape[2] if w < h: __UpperCAmelCase : Optional[int] = int(self.size["""shortest_edge"""] * h / w ) __UpperCAmelCase : Any = self.size["shortest_edge"] elif w > h: __UpperCAmelCase : Optional[int] = self.size["shortest_edge"] __UpperCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h ) else: __UpperCAmelCase : Dict = self.size["shortest_edge"] __UpperCAmelCase : Optional[Any] = self.size["shortest_edge"] else: __UpperCAmelCase : Tuple = [] for image in image_inputs: __UpperCAmelCase : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCAmelCase : str = max(__lowerCamelCase , key=lambda UpperCamelCase : item[0] )[0] __UpperCAmelCase : List[Any] = max(__lowerCamelCase , key=lambda UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = DetaImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = DetaImageProcessingTester(self ) @property def lowerCamelCase__ ( self : str ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_rescale""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_pad""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """size""" ) ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' pass def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input __UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) __UpperCAmelCase : List[Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input __UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __UpperCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values __UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input __UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __UpperCAmelCase : int = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : Any = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values __UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __UpperCAmelCase : Optional[int] = json.loads(f.read() ) __UpperCAmelCase : Optional[Any] = {"image_id": 39_769, "annotations": target} # encode them __UpperCAmelCase : Any = DetaImageProcessor() __UpperCAmelCase : List[Any] = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="""pt""" ) # verify pixel values __UpperCAmelCase : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape , __lowerCamelCase ) __UpperCAmelCase : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area __UpperCAmelCase : List[Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowerCamelCase ) ) # verify boxes __UpperCAmelCase : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowerCamelCase ) __UpperCAmelCase : List[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id __UpperCAmelCase : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowerCamelCase ) ) # verify is_crowd __UpperCAmelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowerCamelCase ) ) # verify class_labels __UpperCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowerCamelCase ) ) # verify orig_size __UpperCAmelCase : List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowerCamelCase ) ) # verify size __UpperCAmelCase : Union[str, Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowerCamelCase ) ) @slow def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __UpperCAmelCase : Any = json.loads(f.read() ) __UpperCAmelCase : Optional[Any] = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} __UpperCAmelCase : Dict = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __UpperCAmelCase : str = DetaImageProcessor(format="""coco_panoptic""" ) __UpperCAmelCase : Optional[int] = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="""pt""" ) # verify pixel values __UpperCAmelCase : Any = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape , __lowerCamelCase ) __UpperCAmelCase : str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area __UpperCAmelCase : List[str] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowerCamelCase ) ) # verify boxes __UpperCAmelCase : List[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowerCamelCase ) __UpperCAmelCase : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id __UpperCAmelCase : Optional[int] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowerCamelCase ) ) # verify is_crowd __UpperCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowerCamelCase ) ) # verify class_labels __UpperCAmelCase : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowerCamelCase ) ) # verify masks __UpperCAmelCase : Dict = 822_873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __lowerCamelCase ) # verify orig_size __UpperCAmelCase : Union[str, Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowerCamelCase ) ) # verify size __UpperCAmelCase : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowerCamelCase ) )
139
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase__ : '''simple docstring''' @property def _lowerCamelCase ( self) -> Tuple: return self.get_dummy_input() @property def _lowerCamelCase ( self) -> List[Any]: if self.block_type == "down": return (4, 3_2, 1_6, 1_6) elif self.block_type == "mid": return (4, 3_2, 3_2, 3_2) elif self.block_type == "up": return (4, 3_2, 6_4, 6_4) raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.") def _lowerCamelCase ( self , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , ) -> Dict: _A : Tuple = 4 _A : Optional[Any] = 3_2 _A : Optional[int] = (3_2, 3_2) _A : Dict = torch.manual_seed(0) _A : List[Any] = torch.device(__lowerCamelCase) _A : Union[str, Any] = (batch_size, num_channels) + sizes _A : int = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase) _A : Optional[int] = {"hidden_states": hidden_states} if include_temb: _A : Dict = 1_2_8 _A : Any = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase) if include_res_hidden_states_tuple: _A : str = torch.manual_seed(1) _A : str = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase),) if include_encoder_hidden_states: _A : Any = floats_tensor((batch_size, 3_2, 3_2)).to(__lowerCamelCase) if include_skip_sample: _A : List[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase) return dummy_input def _lowerCamelCase ( self) -> Optional[Any]: _A : int = { "in_channels": 3_2, "out_channels": 3_2, "temb_channels": 1_2_8, } if self.block_type == "up": _A : Optional[Any] = 3_2 if self.block_type == "mid": init_dict.pop("out_channels") _A : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: _A , _A : Optional[int] = self.prepare_init_args_and_inputs_for_common() _A : int = self.block_class(**__lowerCamelCase) unet_block.to(__lowerCamelCase) unet_block.eval() with torch.no_grad(): _A : Any = unet_block(**__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase): _A : Optional[Any] = output[0] self.assertEqual(output.shape , self.output_shape) _A : Optional[int] = output[0, -1, -3:, -3:] _A : Dict = torch.tensor(__lowerCamelCase).to(__lowerCamelCase) assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps") def _lowerCamelCase ( self) -> Dict: _A , _A : Optional[int] = self.prepare_init_args_and_inputs_for_common() _A : Optional[int] = self.block_class(**__lowerCamelCase) model.to(__lowerCamelCase) model.train() _A : Tuple = model(**__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase): _A : List[Any] = output[0] _A : Any = torch.device(__lowerCamelCase) _A : Any = randn_tensor(output.shape , device=__lowerCamelCase) _A : Optional[int] = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase) loss.backward()
503
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : int = ["image_processor", "tokenizer"] lowerCAmelCase : Optional[int] = "FlavaImageProcessor" lowerCAmelCase : Tuple = ("BertTokenizer", "BertTokenizerFast") def __init__( self : str , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Tuple ) ->str: '''simple docstring''' _UpperCAmelCase : str = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __A , ) _UpperCAmelCase : Tuple = kwargs.pop("feature_extractor" ) _UpperCAmelCase : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__A , __A ) _UpperCAmelCase : int = self.image_processor def __call__( self : Any , lowerCamelCase__ : Optional[ImageInput] = None , lowerCamelCase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : List[Any] , ) ->Any: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: _UpperCAmelCase : Dict = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) if images is not None: _UpperCAmelCase : Any = self.image_processor( __A , return_image_mask=__A , return_codebook_pixels=__A , return_tensors=__A , **__A , ) if text is not None and images is not None: encoding.update(__A ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Union[str, Any] ) ->str: '''simple docstring''' return self.tokenizer.batch_decode(*__A , **__A ) def lowerCAmelCase__ ( self : Union[str, Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Tuple ) ->Union[str, Any]: '''simple docstring''' return self.tokenizer.decode(*__A , **__A ) @property def lowerCAmelCase__ ( self : Union[str, Any] ) ->str: '''simple docstring''' _UpperCAmelCase : str = self.tokenizer.model_input_names _UpperCAmelCase : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , ) return self.image_processor_class @property def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , ) return self.image_processor
717
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]: '''simple docstring''' _UpperCAmelCase : str = parent _UpperCAmelCase : Optional[int] = batch_size _UpperCAmelCase : List[Any] = seq_length _UpperCAmelCase : Dict = is_training _UpperCAmelCase : int = use_attention_mask _UpperCAmelCase : List[Any] = use_token_type_ids _UpperCAmelCase : int = use_labels _UpperCAmelCase : str = vocab_size _UpperCAmelCase : Tuple = hidden_size _UpperCAmelCase : Dict = num_hidden_layers _UpperCAmelCase : List[Any] = num_attention_heads _UpperCAmelCase : Tuple = intermediate_size _UpperCAmelCase : List[Any] = hidden_act _UpperCAmelCase : Union[str, Any] = hidden_dropout_prob _UpperCAmelCase : List[str] = attention_probs_dropout_prob _UpperCAmelCase : Optional[int] = max_position_embeddings _UpperCAmelCase : Tuple = type_vocab_size _UpperCAmelCase : int = type_sequence_label_size _UpperCAmelCase : List[str] = initializer_range _UpperCAmelCase : Union[str, Any] = num_choices def lowerCAmelCase__ ( self : int ) ->Any: '''simple docstring''' _UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Any = None if self.use_attention_mask: _UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : int = None if self.use_token_type_ids: _UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase : Tuple = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase__ ( self : Dict ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs _UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def lowerCAmelCase__ ( self : int ) ->Dict: '''simple docstring''' _UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs _UpperCAmelCase : List[Any] = True _UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): lowerCAmelCase : Tuple = True lowerCAmelCase : Tuple = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowerCAmelCase__ ( self : Optional[int] ) ->int: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ ) _UpperCAmelCase : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ ) @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self : Tuple ) ->str: '''simple docstring''' _UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ ) _UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) _UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0] _UpperCAmelCase : int = [1, 11, 5_02_65] self.assertEqual(list(output.shape ) , lowerCamelCase__ ) # compare the actual values for a slice. _UpperCAmelCase : int = np.array( [[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict: '''simple docstring''' _UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ ) _UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) _UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0] # compare the actual values for a slice. _UpperCAmelCase : str = np.array( [[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
40
0
"""simple docstring""" import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = {"facebook/bart-base": BartForConditionalGeneration} lowerCamelCase__ = {"facebook/bart-base": BartTokenizer} def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : List[Any] = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." ) parser.add_argument( "--validation_file" ,type=lowercase_ ,default=lowercase_ ,help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length" ,type=lowercase_ ,default=5 ,help="The maximum total input sequence length after tokenization." ,) parser.add_argument( "--num_beams" ,type=lowercase_ ,default=lowercase_ ,help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) ,) parser.add_argument( "--model_name_or_path" ,type=lowercase_ ,help="Path to pretrained model or model identifier from huggingface.co/models." ,required=lowercase_ ,) parser.add_argument( "--config_name" ,type=lowercase_ ,default=lowercase_ ,help="Pretrained config name or path if not the same as model_name" ,) parser.add_argument( "--device" ,type=lowercase_ ,default="cpu" ,help="Device where the model will be run" ,) parser.add_argument("--output_file_path" ,type=lowercase_ ,default=lowercase_ ,help="Where to store the final ONNX file." ) _UpperCamelCase : Optional[int] = parser.parse_args() return args def lowercase__ ( lowercase_ ,lowercase_="cpu" ) -> Any: """simple docstring""" _UpperCamelCase : Optional[Any] = model_dict[model_name].from_pretrained(lowercase_ ).to(lowercase_ ) _UpperCamelCase : Dict = tokenizer_dict[model_name].from_pretrained(lowercase_ ) if model_name in ["facebook/bart-base"]: _UpperCamelCase : Dict = 0 _UpperCamelCase : Dict = None _UpperCamelCase : List[str] = 0 return huggingface_model, tokenizer def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]: """simple docstring""" model.eval() _UpperCamelCase : List[Any] = None _UpperCamelCase : Optional[int] = torch.jit.script(BARTBeamSearchGenerator(lowercase_ ) ) with torch.no_grad(): _UpperCamelCase : Optional[Any] = "My friends are cool but they eat too many carbs." _UpperCamelCase : Any = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1_024 ,return_tensors="pt" ).to(model.device ) _UpperCamelCase : Optional[int] = model.generate( inputs["input_ids"] ,attention_mask=inputs["attention_mask"] ,num_beams=lowercase_ ,max_length=lowercase_ ,early_stopping=lowercase_ ,decoder_start_token_id=model.config.decoder_start_token_id ,) torch.onnx.export( lowercase_ ,( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) ,lowercase_ ,opset_version=14 ,input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] ,output_names=["output_ids"] ,dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } ,example_outputs=lowercase_ ,) logger.info("Model exported to {}".format(lowercase_ ) ) _UpperCamelCase : str = remove_dup_initializers(os.path.abspath(lowercase_ ) ) logger.info("Deduplicated and optimized model written to {}".format(lowercase_ ) ) _UpperCamelCase : Union[str, Any] = onnxruntime.InferenceSession(lowercase_ ) _UpperCamelCase : Optional[Any] = ort_sess.run( lowercase_ ,{ "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(lowercase_ ), "max_length": np.array(lowercase_ ), "decoder_start_token_id": np.array(model.config.decoder_start_token_id ), } ,) np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 ) logger.info("Model outputs from torch and ONNX Runtime are similar." ) logger.info("Success." ) def lowercase__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase : str = parse_args() _UpperCamelCase : Union[str, Any] = 5 _UpperCamelCase : List[str] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() _UpperCamelCase : Optional[int] = torch.device(args.device ) _UpperCamelCase, _UpperCamelCase : Optional[int] = load_model_tokenizer(args.model_name_or_path ,lowercase_ ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" ) model.to(lowercase_ ) if args.max_length: _UpperCamelCase : Any = args.max_length if args.num_beams: _UpperCamelCase : List[Any] = args.num_beams if args.output_file_path: _UpperCamelCase : int = args.output_file_path else: _UpperCamelCase : int = "BART.onnx" logger.info("Exporting model to ONNX" ) export_and_validate_model(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) if __name__ == "__main__": main()
624
"""simple docstring""" import collections import os import re from pathlib import Path lowerCamelCase__ = "src/transformers" # Matches is_xxx_available() lowerCamelCase__ = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} lowerCamelCase__ = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCamelCase__ = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available lowerCamelCase__ = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") lowerCamelCase__ = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCamelCase__ = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", lowerCamelCase__ = re.compile(R"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCamelCase__ = re.compile(R"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo lowerCamelCase__ = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: lowerCamelCase__ = re.compile(R"^\s*try:") # Catches a line with else: lowerCamelCase__ = re.compile(R"^\s*else:") def lowercase__ ( lowercase_ ) -> Optional[int]: """simple docstring""" if _re_test_backend.search(lowercase_ ) is None: return None _UpperCamelCase : Optional[Any] = [b[0] for b in _re_backend.findall(lowercase_ )] backends.sort() return "_and_".join(lowercase_ ) def lowercase__ ( lowercase_ ) -> Dict: """simple docstring""" with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f: _UpperCamelCase : Optional[Any] = f.readlines() _UpperCamelCase : str = 0 while line_index < len(lowercase_ ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowercase_ ): return None # First grab the objects without a specific backend in _import_structure _UpperCamelCase : str = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: _UpperCamelCase : Any = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowercase_ ): _UpperCamelCase : Tuple = _re_one_line_import_struct.search(lowercase_ ).groups()[0] _UpperCamelCase : Any = re.findall(r"\[([^\]]+)\]" ,lowercase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue _UpperCamelCase : str = _re_import_struct_key_value.search(lowercase_ ) if single_line_import_search is not None: _UpperCamelCase : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowercase_ ) > 0] objects.extend(lowercase_ ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 _UpperCamelCase : str = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. _UpperCamelCase : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _UpperCamelCase : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _UpperCamelCase : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): _UpperCamelCase : Dict = lines[line_index] if _re_import_struct_add_one.search(lowercase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowercase_ ) is not None: _UpperCamelCase : int = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(", " ) _UpperCamelCase : str = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0] objects.extend(lowercase_ ) elif _re_between_brackets.search(lowercase_ ) is not None: _UpperCamelCase : Optional[Any] = _re_between_brackets.search(lowercase_ ).groups()[0].split(", " ) _UpperCamelCase : Any = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0] objects.extend(lowercase_ ) elif _re_quote_object.search(lowercase_ ) is not None: objects.append(_re_quote_object.search(lowercase_ ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 _UpperCamelCase : List[str] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _UpperCamelCase : Optional[Any] = [] while ( line_index < len(lowercase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): _UpperCamelCase : List[Any] = lines[line_index] _UpperCamelCase : Union[str, Any] = _re_import.search(lowercase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 _UpperCamelCase : List[str] = {"none": objects} # Let's continue with backend-specific objects while line_index < len(lowercase_ ): # If the line is an if is_backend_available, we grab all objects associated. _UpperCamelCase : Any = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _UpperCamelCase : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _UpperCamelCase : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): _UpperCamelCase : str = lines[line_index] _UpperCamelCase : Optional[int] = _re_import.search(lowercase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 _UpperCamelCase : Dict = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]: """simple docstring""" def find_duplicates(lowercase_ ): return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _UpperCamelCase : Optional[Any] = [] for key in import_dict_objects.keys(): _UpperCamelCase : Optional[int] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _UpperCamelCase : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _UpperCamelCase : Tuple = "base imports" if key == "none" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def lowercase__ ( ) -> str: """simple docstring""" _UpperCamelCase : Tuple = [] for root, _, files in os.walk(lowercase_ ): if "__init__.py" in files: _UpperCamelCase : Dict = os.path.join(lowercase_ ,"__init__.py" ) _UpperCamelCase : str = parse_init(lowercase_ ) if objects is not None: _UpperCamelCase : Optional[Any] = analyze_results(*lowercase_ ) if len(lowercase_ ) > 0: _UpperCamelCase : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("\n".join(lowercase_ ) ) if len(lowercase_ ) > 0: raise ValueError("\n\n".join(lowercase_ ) ) def lowercase__ ( ) -> Dict: """simple docstring""" _UpperCamelCase : str = [] for path, directories, files in os.walk(lowercase_ ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(lowercase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowercase_ ) / folder).glob("*.py" ) ) ) == 0: continue _UpperCamelCase : str = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) ) _UpperCamelCase : int = short_path.replace(os.path.sep ,"." ) submodules.append(lowercase_ ) for fname in files: if fname == "__init__.py": continue _UpperCamelCase : List[Any] = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) ) _UpperCamelCase : Optional[int] = short_path.replace(".py" ,"" ).replace(os.path.sep ,"." ) if len(submodule.split("." ) ) == 1: submodules.append(lowercase_ ) return submodules lowerCamelCase__ = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def lowercase__ ( ) -> Tuple: """simple docstring""" from transformers.utils import direct_transformers_import _UpperCamelCase : List[str] = direct_transformers_import(lowercase_ ) _UpperCamelCase : Any = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowercase_ ,"__init__.py" ) ,"r" ) as f: _UpperCamelCase : Union[str, Any] = f.read() import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" ,lowercase_ ) ) ) _UpperCamelCase : Optional[Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowercase_ ) > 0: _UpperCamelCase : List[str] = "\n".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( "The following submodules are not properly registed in the main init of Transformers:\n" F'''{list_of_modules}\n''' "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
624
1
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate _lowerCamelCase : Optional[Any] = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", "|", "|"), datarow=DataRow("", "|", "|"), padding=1, with_header_hide=None, ) _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Tuple = [] _lowerCamelCase : List[str] = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}} _lowerCamelCase : Optional[Any] = [ { "type": "header", "text": { "type": "plain_text", "text": F'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''', "emoji": True, }, } ] _lowerCamelCase : str = 0 for log in Path().glob("*.log"): _lowerCamelCase : str = 0 with open(log, "r") as f: for line in f: _lowerCamelCase : Optional[Any] = json.loads(line) if line.get("nodeid", "") != "": _lowerCamelCase : str = line["nodeid"] if line.get("duration", None) is not None: _lowerCamelCase : str = F'''{line['duration']:.4f}''' if line.get("outcome", "") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("_")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) _lowerCamelCase : Optional[int] = [] log.unlink() _lowerCamelCase : str = "" _lowerCamelCase : Optional[Any] = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" _lowerCamelCase : Any = [] _lowerCamelCase : Dict = {} for test in failed_tests: _lowerCamelCase : Union[str, Any] = test[0].split("::") _lowerCamelCase : str = data[0].split("/")[-1] if data[0] not in filesafailed: _lowerCamelCase : List[str] = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) _lowerCamelCase : List[Any] = [test[0] for test in failed_table] _lowerCamelCase : Optional[Any] = list(set(files)) # Count number of instances in failed_tests _lowerCamelCase : Union[str, Any] = [] for file in individual_files: table.append([file, len(filesafailed[file])]) _lowerCamelCase : Optional[Any] = tabulate( table, headers=["Test Location", "Num Failed"], tablefmt=hf_table_format, stralign="right", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_0_0_0: _lowerCamelCase : List[str] = "Too many failed tests, please see the full report in the Action results." _lowerCamelCase : Tuple = len(err) + 1_0 _lowerCamelCase : Union[str, Any] = message[: 3_0_0_0 - offset] + F'''\n...\n```\n{err}''' print(F'''### {message}''') else: _lowerCamelCase : List[str] = "No failed tests! 🤗" print(F'''## {message}''') payload.append(no_error_payload) if os.environ.get("TEST_TYPE", "") != "": from slack_sdk import WebClient _lowerCamelCase : Optional[int] = WebClient(token=os.environ["SLACK_API_TOKEN"]) if message != "No failed tests! 🤗": _lowerCamelCase : int = { "type": "section", "text": { "type": "mrkdwn", "text": message, }, } payload.append(md_report) _lowerCamelCase : List[Any] = { "type": "section", "text": { "type": "mrkdwn", "text": "*For more details:*", }, "accessory": { "type": "button", "text": { "type": "plain_text", "text": "Check Action results", "emoji": True, }, "url": F'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } payload.append(action_button) _lowerCamelCase : Union[str, Any] = { "type": "context", "elements": [ { "type": "plain_text", "text": F'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''', } ], } payload.append(date_report) _lowerCamelCase : int = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload) _lowerCamelCase : List[Any] = response.data["ts"] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name _lowerCamelCase : Dict = "" for i, row in enumerate(test_failures): if row[0] != test_class: _lowerCamelCase : Optional[int] = row[0] else: _lowerCamelCase : List[str] = "" _lowerCamelCase : int = { "type": "section", "text": { "type": "mrkdwn", "text": F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''', }, } client.chat_postMessage( channel="#accelerate-ci-daily", thread_ts=ts, blocks=[payload], )
196
import cva import numpy as np class __snake_case : def __init__( self : List[str] , _UpperCAmelCase : float , _UpperCAmelCase : int ) -> int: '''simple docstring''' if k in (0.04, 0.06): _lowerCAmelCase : str = k _lowerCAmelCase : Optional[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : int ) -> str: '''simple docstring''' return str(self.k ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : str ) -> tuple[cva.Mat, list[list[int]]]: '''simple docstring''' _lowerCAmelCase : Tuple = cva.imread(_UpperCAmelCase , 0 ) _lowerCAmelCase , _lowerCAmelCase : Tuple = img.shape _lowerCAmelCase : list[list[int]] = [] _lowerCAmelCase : int = img.copy() _lowerCAmelCase : str = cva.cvtColor(_UpperCAmelCase , cva.COLOR_GRAY2RGB ) _lowerCAmelCase , _lowerCAmelCase : int = np.gradient(_UpperCAmelCase ) _lowerCAmelCase : Any = dx**2 _lowerCAmelCase : Optional[int] = dy**2 _lowerCAmelCase : Optional[Any] = dx * dy _lowerCAmelCase : Dict = 0.04 _lowerCAmelCase : Tuple = self.window_size // 2 for y in range(_UpperCAmelCase , h - offset ): for x in range(_UpperCAmelCase , w - offset ): _lowerCAmelCase : Optional[int] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowerCAmelCase : int = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowerCAmelCase : Any = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowerCAmelCase : Dict = (wxx * wyy) - (wxy**2) _lowerCAmelCase : Union[str, Any] = wxx + wyy _lowerCAmelCase : int = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _lowerCamelCase : int = HarrisCorner(0.0_4, 3) _lowerCamelCase , _lowerCamelCase : Any = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
196
1
import math def UpperCAmelCase_ ( UpperCAmelCase__ ): return math.sqrt(SCREAMING_SNAKE_CASE_ ) * math.sqrt(SCREAMING_SNAKE_CASE_ ) == num def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = 0 lowercase_ = n while left <= right: lowercase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowercase_ = mid - 1 else: lowercase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
412
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Any = "falcon" __lowerCamelCase : List[str] = ["past_key_values"] def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]: _lowerCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase ) _lowerCAmelCase = hidden_size if n_embed is None else n_embed _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = use_cache _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id _lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _lowerCAmelCase = alibi _lowerCAmelCase = new_decoder_architecture _lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True _lowerCAmelCase = parallel_attn _lowerCAmelCase = bias super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) @property def _snake_case ( self ) -> Optional[Any]: return self.hidden_size // self.num_attention_heads @property def _snake_case ( self ) -> Optional[Any]: return not self.alibi
18
0
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ): def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: a_ : Union[str, Any] = tempfile.mkdtemp() a_ : Union[str, Any] = 8 # DPR tok a_ : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok a_ : Union[str, Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] a_ : Optional[int] = {'''unk_token''': '''<unk>'''} a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: a_ : str = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: a_ : List[str] = self.get_dummy_dataset() a_ : Tuple = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: a_ : Tuple = dataset a_ : Any = RagRetriever( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict: a_ : Dict = self.get_dummy_dataset() a_ : Dict = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' ) a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset a_ : int = RagRetriever( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: a_ : Optional[Any] = RagRetriever( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , ) return retriever def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: a_ : str = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) ) a_ : Optional[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) a_ : int = RagRetriever( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: a_ : Optional[Any] = 1 a_ : Dict = self.get_dummy_canonical_hf_index_retriever() a_ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: a_ : str = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: a_ : List[str] = self.get_dummy_dataset() retriever.save_pretrained(__SCREAMING_SNAKE_CASE ) a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a_ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: a_ : Union[str, Any] = 1 a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE ) a_ : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__SCREAMING_SNAKE_CASE ) a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a_ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: a_ : Union[str, Any] = 1 a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE ) a_ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__SCREAMING_SNAKE_CASE ) a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a_ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: a_ : str = 1 a_ : Tuple = self.get_dummy_legacy_index_retriever() a_ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: a_ : List[str] = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__SCREAMING_SNAKE_CASE ) a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a_ : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: import torch a_ : Any = 1 a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever() a_ : Union[str, Any] = [[5, 7], [10, 11]] a_ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE ) a_ , a_ , a_ : List[str] = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) a_ : Any = retriever( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) a_ , a_ , a_ , a_ : str = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: a_ : str = self.get_dpr_ctx_encoder_tokenizer() a_ : Tuple = 1 a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE ) retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE ) a_ : Dict = [[5, 7], [10, 11]] a_ : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE ) self.assertEqual( len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
666
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class SCREAMING_SNAKE_CASE : snake_case__ = 42 snake_case__ = None # Automatically constructed snake_case__ = "dict" snake_case__ = None snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ ) def __call__( self : Dict ) -> Tuple: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class SCREAMING_SNAKE_CASE : snake_case__ = None snake_case__ = None snake_case__ = None # Automatically constructed snake_case__ = "dict" snake_case__ = None snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None a_ : Optional[Any] = len(self.languages ) if self.languages else None def __call__( self : Any ) -> Optional[Any]: return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: a_ : str = set(self.languages ) if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set: raise ValueError( f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. a_ : int = [] for lang, text in translation_dict.items(): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) ) return {"language": languages, "translation": translations} def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
666
1
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def a_ ( UpperCamelCase_ = True , *UpperCamelCase_ , **UpperCamelCase_ ): if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) A_ = False if main_process_only: A_ = PartialState().local_process_index == 0 return _tqdm(*UpperCamelCase_ , **UpperCamelCase_ , disable=UpperCamelCase_ )
452
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __SCREAMING_SNAKE_CASE : str = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __SCREAMING_SNAKE_CASE : Any = TaTokenizerFast __SCREAMING_SNAKE_CASE : Tuple = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[Any] = [ '''MT5EncoderModel''', '''MT5ForConditionalGeneration''', '''MT5ForQuestionAnswering''', '''MT5Model''', '''MT5PreTrainedModel''', '''MT5Stack''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : str = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[str] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model'''] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule( __name__, globals()['''__file__'''], _import_structure, extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast}, module_spec=__spec__, )
452
1
class _lowerCAmelCase : """simple docstring""" def __init__( self : Tuple ) -> None: """simple docstring""" __lowercase = {} # Mapping from char to TrieNode __lowercase = False def snake_case__ ( self : Tuple , lowercase : list[str] ) -> None: """simple docstring""" for word in words: self.insert(lowercase ) def snake_case__ ( self : List[str] , lowercase : str ) -> None: """simple docstring""" __lowercase = self for char in word: if char not in curr.nodes: __lowercase = TrieNode() __lowercase = curr.nodes[char] __lowercase = True def snake_case__ ( self : Optional[int] , lowercase : str ) -> bool: """simple docstring""" __lowercase = self for char in word: if char not in curr.nodes: return False __lowercase = curr.nodes[char] return curr.is_leaf def snake_case__ ( self : Any , lowercase : str ) -> None: """simple docstring""" def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool: if index == len(lowercase ): # If word does not exist if not curr.is_leaf: return False __lowercase = False return len(curr.nodes ) == 0 __lowercase = word[index] __lowercase = curr.nodes.get(lowercase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted __lowercase = _delete(lowercase , lowercase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , lowercase , 0 ) def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> None: if node.is_leaf: print(lowercase__ , end=""" """ ) for key, value in node.nodes.items(): print_words(lowercase__ , word + key ) def UpperCAmelCase__ ( ) -> bool: __lowercase = """banana bananas bandana band apple all beast""".split() __lowercase = TrieNode() root.insert_many(lowercase__ ) # print_words(root, "") assert all(root.find(lowercase__ ) for word in words ) assert root.find("""banana""" ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) assert root.find("""apple""" ) assert root.find("""all""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> None: print(str(lowercase__ ) , """works!""" if passes else """doesn't work :(""" ) def UpperCAmelCase__ ( ) -> None: assert test_trie() def UpperCAmelCase__ ( ) -> None: print_results("""Testing trie functionality""" , test_trie() ) if __name__ == "__main__": main()
634
from __future__ import annotations from collections.abc import Callable UpperCamelCase__ = list[list[float | int]] def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Matrix: __lowercase = len(lowercase__ ) __lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowercase__ )] __lowercase = 42 __lowercase = 42 __lowercase = 42 __lowercase = 42 __lowercase = 42 __lowercase = 42 for row in range(lowercase__ ): for col in range(lowercase__ ): __lowercase = matrix[row][col] __lowercase = vector[row][0] __lowercase = 0 __lowercase = 0 while row < size and col < size: # pivoting __lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase__ , lowercase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __lowercase , __lowercase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , lowercase__ ): __lowercase = augmented[rowa][col] / augmented[row][col] __lowercase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , lowercase__ ): for row in range(lowercase__ ): __lowercase = augmented[row][col] / augmented[col][col] for cola in range(lowercase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase__ ) ] def UpperCAmelCase__ ( lowercase__ ) -> Callable[[int], int]: __lowercase = len(lowercase__ ) __lowercase = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )] __lowercase = [[0] for _ in range(lowercase__ )] __lowercase = 42 __lowercase = 42 __lowercase = 42 __lowercase = 42 for x_val, y_val in enumerate(lowercase__ ): for col in range(lowercase__ ): __lowercase = (x_val + 1) ** (size - col - 1) __lowercase = y_val __lowercase = solve(lowercase__ , lowercase__ ) def interpolated_func(lowercase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(lowercase__ ) ) return interpolated_func def UpperCAmelCase__ ( lowercase__ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def UpperCAmelCase__ ( lowercase__ = question_function , lowercase__ = 10 ) -> int: __lowercase = [func(lowercase__ ) for x_val in range(1 , order + 1 )] __lowercase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __lowercase = 0 __lowercase = 42 __lowercase = 42 for poly in polynomials: __lowercase = 1 while func(lowercase__ ) == poly(lowercase__ ): x_val += 1 ret += poly(lowercase__ ) return ret if __name__ == "__main__": print(F"""{solution() = }""")
634
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig class __A ( A ): '''simple docstring''' __lowerCamelCase : Any = 'bert-generation' def __init__(self , A=50_358 , A=1_024 , A=24 , A=16 , A=4_096 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=0.02 , A=1E-12 , A=0 , A=2 , A=1 , A="absolute" , A=True , **A , ) -> int: """simple docstring""" super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A ) _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = initializer_range _a = layer_norm_eps _a = position_embedding_type _a = use_cache
11
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: __snake_case = None try: import msvcrt except ImportError: __snake_case = None try: import fcntl except ImportError: __snake_case = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __snake_case = OSError # Data # ------------------------------------------------ __snake_case = [ 'Timeout', 'BaseFileLock', 'WindowsFileLock', 'UnixFileLock', 'SoftFileLock', 'FileLock', ] __snake_case = '3.0.12' __snake_case = None def _lowerCamelCase ( ): global _logger lowercase__ : Tuple = _logger or logging.getLogger(__name__ ) return _logger class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): """simple docstring""" def __init__( self , lowerCamelCase__ ) -> Optional[int]: lowercase__ : Union[str, Any] = lock_file return None def __str__( self ) -> List[Any]: lowercase__ : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class _SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , lowerCamelCase__ ) -> Optional[Any]: lowercase__ : str = lock return None def __enter__( self ) -> List[Any]: return self.lock def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: self.lock.release() return None class _SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> Optional[Any]: lowercase__ : List[Any] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long lowercase__ : Union[str, Any] = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ ) # The path to the lock file. lowercase__ : int = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. lowercase__ : Dict = None # The default timeout value. lowercase__ : Optional[Any] = timeout # We use this lock primarily for the lock counter. lowercase__ : Optional[int] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. lowercase__ : Union[str, Any] = 0 return None @property def UpperCAmelCase__( self ) -> List[str]: return self._lock_file @property def UpperCAmelCase__( self ) -> Union[str, Any]: return self._timeout @timeout.setter def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]: lowercase__ : Union[str, Any] = float(lowerCamelCase__ ) return None def UpperCAmelCase__( self ) -> Tuple: raise NotImplementedError() def UpperCAmelCase__( self ) -> Tuple: raise NotImplementedError() @property def UpperCAmelCase__( self ) -> str: return self._lock_file_fd is not None def UpperCAmelCase__( self , lowerCamelCase__=None , lowerCamelCase__=0.05 ) -> List[str]: # Use the default timeout, if no timeout is provided. if timeout is None: lowercase__ : int = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 lowercase__ : Tuple = id(self ) lowercase__ : Any = self._lock_file lowercase__ : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(lowerCamelCase__ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: lowercase__ : Any = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__( self , lowerCamelCase__=False ) -> int: with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: lowercase__ : Tuple = id(self ) lowercase__ : int = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() lowercase__ : str = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self ) -> Dict: self.acquire() return self def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: self.release() return None def __del__( self ) -> int: self.release(force=lowerCamelCase__ ) return None def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> str: lowercase__ : Optional[int] = os.path.basename(lowerCamelCase__ ) if len(lowerCamelCase__ ) > max_length and max_length > 0: lowercase__ : Union[str, Any] = os.path.dirname(lowerCamelCase__ ) lowercase__ : List[Any] = str(hash(lowerCamelCase__ ) ) lowercase__ : Optional[int] = filename[: max_length - len(lowerCamelCase__ ) - 8] + """...""" + hashed_filename + """.lock""" return os.path.join(lowerCamelCase__ , lowerCamelCase__ ) else: return path class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> Tuple: from .file_utils import relative_to_absolute_path super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ ) lowercase__ : List[Any] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__( self ) -> Tuple: lowercase__ : Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: lowercase__ : Dict = os.open(self._lock_file , lowerCamelCase__ ) except OSError: pass else: try: msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(lowerCamelCase__ ) else: lowercase__ : Optional[Any] = fd return None def UpperCAmelCase__( self ) -> List[Any]: lowercase__ : int = self._lock_file_fd lowercase__ : Any = None msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 ) os.close(lowerCamelCase__ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> List[str]: lowercase__ : Optional[Any] = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ ) def UpperCAmelCase__( self ) -> str: lowercase__ : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC lowercase__ : List[Any] = os.open(self._lock_file , lowerCamelCase__ ) try: fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(lowerCamelCase__ ) else: lowercase__ : Any = fd return None def UpperCAmelCase__( self ) -> str: # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition lowercase__ : Optional[int] = self._lock_file_fd lowercase__ : Optional[Any] = None fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN ) os.close(lowerCamelCase__ ) return None class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): """simple docstring""" def UpperCAmelCase__( self ) -> List[str]: lowercase__ : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: lowercase__ : Any = os.open(self._lock_file , lowerCamelCase__ ) except OSError: pass else: lowercase__ : Union[str, Any] = fd return None def UpperCAmelCase__( self ) -> Tuple: os.close(self._lock_file_fd ) lowercase__ : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __snake_case = None if msvcrt: __snake_case = WindowsFileLock elif fcntl: __snake_case = UnixFileLock else: __snake_case = SoftFileLock if warnings is not None: warnings.warn('only soft file lock is available')
200
0
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset SCREAMING_SNAKE_CASE_ = random.Random() def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=1.0, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None ) -> Tuple: if rng is None: a_ : Any = global_rng a_ : Dict = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case_ ( unittest.TestCase ): def __init__( self , a_ , a_=7 , a_=4_0_0 , a_=2_0_0_0 , a_=2_0_4_8 , a_=1_2_8 , a_=1 , a_=5_1_2 , a_=3_0 , a_=4_4_1_0_0 , ): a_ : Optional[Any] = parent a_ : Tuple = batch_size a_ : Union[str, Any] = min_seq_length a_ : Dict = max_seq_length a_ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) a_ : int = spectrogram_length a_ : Optional[Any] = feature_size a_ : Optional[int] = num_audio_channels a_ : List[str] = hop_length a_ : Optional[int] = chunk_length a_ : List[str] = sampling_rate def snake_case_ ( self ): return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def snake_case_ ( self , a_=False , a_=False ): def _flatten(a_ ): return list(itertools.chain(*a_ ) ) if equal_length: a_ : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size a_ : Optional[Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: a_ : Tuple = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case_ ( a_ ,unittest.TestCase ): __lowerCAmelCase = TvltFeatureExtractor def snake_case_ ( self ): a_ : Dict = TvltFeatureExtractionTester(self ) def snake_case_ ( self ): a_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(a_ , "spectrogram_length" ) ) self.assertTrue(hasattr(a_ , "feature_size" ) ) self.assertTrue(hasattr(a_ , "num_audio_channels" ) ) self.assertTrue(hasattr(a_ , "hop_length" ) ) self.assertTrue(hasattr(a_ , "chunk_length" ) ) self.assertTrue(hasattr(a_ , "sampling_rate" ) ) def snake_case_ ( self ): a_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: a_ : List[Any] = feat_extract_first.save_pretrained(a_ )[0] check_json_file_has_correct_format(a_ ) a_ : Tuple = self.feature_extraction_class.from_pretrained(a_ ) a_ : Dict = feat_extract_first.to_dict() a_ : List[str] = feat_extract_second.to_dict() a_ : List[Any] = dict_first.pop("mel_filters" ) a_ : str = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(a_ , a_ ) ) self.assertEqual(a_ , a_ ) def snake_case_ ( self ): a_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: a_ : Dict = os.path.join(a_ , "feat_extract.json" ) feat_extract_first.to_json_file(a_ ) a_ : str = self.feature_extraction_class.from_json_file(a_ ) a_ : Dict = feat_extract_first.to_dict() a_ : str = feat_extract_second.to_dict() a_ : int = dict_first.pop("mel_filters" ) a_ : List[Any] = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(a_ , a_ ) ) self.assertEqual(a_ , a_ ) def snake_case_ ( self ): # Initialize feature_extractor a_ : str = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 a_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] a_ : Union[str, Any] = [np.asarray(a_ ) for speech_input in speech_inputs] # Test not batched input a_ : Dict = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched a_ : int = feature_extractor(a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking a_ : Optional[int] = feature_extractor( a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 , mask_audio=a_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. a_ : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] a_ : Union[str, Any] = np.asarray(a_ ) a_ : Dict = feature_extractor(a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def snake_case_ ( self , a_ ): a_ : Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech a_ : List[str] = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def snake_case_ ( self ): a_ : List[str] = self._load_datasamples(1 ) a_ : Any = TvltFeatureExtractor() a_ : Union[str, Any] = feature_extractor(a_ , return_tensors="pt" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) ) a_ : Dict = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a_ , atol=1e-4 ) )
370
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class snake_case_ ( a_ ): __lowerCAmelCase = "blenderbot-small" __lowerCAmelCase = ["past_key_values"] __lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , a_=5_0_2_6_5 , a_=5_1_2 , a_=8 , a_=2_0_4_8 , a_=1_6 , a_=8 , a_=2_0_4_8 , a_=1_6 , a_=0.0 , a_=0.0 , a_=True , a_=True , a_="gelu" , a_=5_1_2 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.02 , a_=1 , a_=False , a_=0 , a_=1 , a_=2 , a_=2 , **a_ , ): a_ : int = vocab_size a_ : Any = max_position_embeddings a_ : Optional[int] = d_model a_ : Tuple = encoder_ffn_dim a_ : List[Any] = encoder_layers a_ : Optional[int] = encoder_attention_heads a_ : Optional[int] = decoder_ffn_dim a_ : List[str] = decoder_layers a_ : Dict = decoder_attention_heads a_ : List[str] = dropout a_ : List[Any] = attention_dropout a_ : List[str] = activation_dropout a_ : Optional[Any] = activation_function a_ : List[Any] = init_std a_ : int = encoder_layerdrop a_ : Optional[int] = decoder_layerdrop a_ : List[str] = use_cache a_ : Optional[int] = encoder_layers a_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , ) class snake_case_ ( a_ ): @property def snake_case_ ( self ): if self.task in ["default", "seq2seq-lm"]: a_ : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: a_ : Tuple = {0: "batch"} a_ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: a_ : List[str] = {0: "batch", 1: "decoder_sequence"} a_ : Optional[int] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(a_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. a_ : Dict = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: a_ , a_ : Optional[int] = self.num_layers for i in range(a_ ): a_ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} a_ : List[Any] = {0: "batch", 2: "past_sequence + sequence"} else: a_ : List[str] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def snake_case_ ( self ): if self.task in ["default", "seq2seq-lm"]: a_ : List[Any] = super().outputs else: a_ : Tuple = super(a_ , self ).outputs if self.use_past: a_ , a_ : Dict = self.num_layers for i in range(a_ ): a_ : List[Any] = {0: "batch", 2: "past_sequence + sequence"} a_ : List[Any] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def snake_case_ ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ): a_ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a_ , a_ , a_ , a_ , a_ ) # Generate decoder inputs a_ : Optional[int] = seq_length if not self.use_past else 1 a_ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a_ , a_ , a_ , a_ , a_ ) a_ : int = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} a_ : Tuple = dict(**a_ , **a_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch a_ , a_ : Optional[int] = common_inputs["input_ids"].shape a_ : str = common_inputs["decoder_input_ids"].shape[1] a_ , a_ : Dict = self.num_attention_heads a_ : List[str] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) a_ : Optional[Any] = decoder_seq_length + 3 a_ : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) a_ : Dict = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(a_ , a_ )] , dim=1 ) a_ : Optional[int] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered a_ , a_ : Tuple = self.num_layers a_ : str = min(a_ , a_ ) a_ : Dict = max(a_ , a_ ) - min_num_layers a_ : Union[str, Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(a_ ): common_inputs["past_key_values"].append( ( torch.zeros(a_ ), torch.zeros(a_ ), torch.zeros(a_ ), torch.zeros(a_ ), ) ) # TODO: test this. a_ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(a_ , a_ ): common_inputs["past_key_values"].append((torch.zeros(a_ ), torch.zeros(a_ )) ) return common_inputs def snake_case_ ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ): a_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a_ , a_ , a_ , a_ , a_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch a_ , a_ : Dict = common_inputs["input_ids"].shape # Not using the same length for past_key_values a_ : int = seqlen + 2 a_ , a_ : Optional[int] = self.num_layers a_ , a_ : Optional[int] = self.num_attention_heads a_ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) a_ : str = common_inputs["attention_mask"].dtype a_ : Tuple = torch.cat( [common_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 ) a_ : Optional[int] = [ (torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(a_ ) ] return common_inputs def snake_case_ ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX a_ : Optional[Any] = compute_effective_axis_dimension( a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a_ : Tuple = tokenizer.num_special_tokens_to_add(a_ ) a_ : Union[str, Any] = compute_effective_axis_dimension( a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ ) # Generate dummy inputs according to compute batch and sequence a_ : Union[str, Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size a_ : str = dict(tokenizer(a_ , return_tensors=a_ ) ) return common_inputs def snake_case_ ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ): if self.task in ["default", "seq2seq-lm"]: a_ : List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ ) elif self.task == "causal-lm": a_ : List[Any] = self._generate_dummy_inputs_for_causal_lm( a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ ) else: a_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ ) return common_inputs def snake_case_ ( self , a_ , a_ , a_ , a_ ): if self.task in ["default", "seq2seq-lm"]: a_ : Optional[int] = super()._flatten_past_key_values_(a_ , a_ , a_ , a_ ) else: a_ : int = super(a_ , self )._flatten_past_key_values_( a_ , a_ , a_ , a_ )
370
1
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" if isinstance(lowercase , lowercase ): __lowercase = np.full((len(lowercase ), sequence_length, 2) , lowercase ) else: __lowercase = np.full((len(lowercase ), sequence_length) , lowercase ) for i, tensor in enumerate(lowercase ): if padding_side == "right": if isinstance(lowercase , lowercase ): __lowercase = tensor[:sequence_length] else: __lowercase = tensor[:sequence_length] else: if isinstance(lowercase , lowercase ): __lowercase = tensor[:sequence_length] else: __lowercase = tensor[:sequence_length] return out_tensor.tolist() def UpperCAmelCase ( lowercase ): """simple docstring""" __lowercase = ord(lowercase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True __lowercase = unicodedata.category(lowercase ) if cat.startswith('''P''' ): return True return False @dataclass class _UpperCamelCase ( _UpperCAmelCase ): """simple docstring""" __a : PreTrainedTokenizerBase __a : Union[bool, str, PaddingStrategy] = True __a : Optional[int] = None __a : Optional[int] = None __a : int = -100 __a : str = "pt" def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' import torch __lowercase = '''label''' if '''label''' in features[0].keys() else '''labels''' __lowercase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __lowercase = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __lowercase = torch.tensor(batch['''entity_ids'''] ).shape[1] __lowercase = self.tokenizer.padding_side if padding_side == "right": __lowercase = [ list(lowerCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) for label in labels ] else: __lowercase = [ [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) + list(lowerCAmelCase__ ) for label in labels ] __lowercase = [feature['''ner_tags'''] for feature in features] __lowercase = padding_tensor(lowerCAmelCase__ , -1 , lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = [feature['''original_entity_spans'''] for feature in features] __lowercase = padding_tensor(lowerCAmelCase__ , (-1, -1) , lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = {k: torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
534
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ): """simple docstring""" @register_to_config def __init__( self , *, lowerCAmelCase__ = 4 , lowerCAmelCase__ = 7_68 , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Dict: '''simple docstring''' super().__init__() __lowercase = nn.Parameter(torch.zeros(lowerCAmelCase__ ) ) # parameters for additional clip time embeddings __lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) # parameters for encoder hidden states __lowercase = clip_extra_context_tokens __lowercase = nn.Linear( lowerCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim ) __lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = nn.LayerNorm(lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self , *, lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __lowercase = image_embeddings.shape[0] __lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) __lowercase = classifier_free_guidance_embeddings.expand( lowerCAmelCase__ , -1 ) __lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __lowercase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __lowercase = self.embedding_proj(lowerCAmelCase__ ) __lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase__ ) __lowercase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __lowercase = self.clip_extra_context_tokens_proj(lowerCAmelCase__ ) __lowercase = clip_extra_context_tokens.reshape(lowerCAmelCase__ , -1 , self.clip_extra_context_tokens ) __lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 ) __lowercase = self.encoder_hidden_states_proj(lowerCAmelCase__ ) __lowercase = self.text_encoder_hidden_states_norm(lowerCAmelCase__ ) __lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
534
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A: Optional[int] = logging.get_logger(__name__) A: Any = { "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : str = 'lilt' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=1024 , **_SCREAMING_SNAKE_CASE , ) -> int: '''simple docstring''' super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = vocab_size UpperCAmelCase : Any = hidden_size UpperCAmelCase : Optional[Any] = num_hidden_layers UpperCAmelCase : int = num_attention_heads UpperCAmelCase : Tuple = hidden_act UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Optional[int] = hidden_dropout_prob UpperCAmelCase : Dict = attention_probs_dropout_prob UpperCAmelCase : Dict = max_position_embeddings UpperCAmelCase : str = type_vocab_size UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Tuple = layer_norm_eps UpperCAmelCase : Union[str, Any] = position_embedding_type UpperCAmelCase : List[Any] = classifier_dropout UpperCAmelCase : Dict = channel_shrink_ratio UpperCAmelCase : Tuple = max_ad_position_embeddings
721
"""simple docstring""" import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ ): @register_to_config def __init__( self , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = 256 , _SCREAMING_SNAKE_CASE = 2000.0 , _SCREAMING_SNAKE_CASE = 768 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 2048 , _SCREAMING_SNAKE_CASE = 0.1 , ) -> int: '''simple docstring''' super().__init__() UpperCAmelCase : Tuple = nn.Sequential( nn.Linear(_SCREAMING_SNAKE_CASE , d_model * 4 , bias=_SCREAMING_SNAKE_CASE ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_SCREAMING_SNAKE_CASE ) , nn.SiLU() , ) UpperCAmelCase : Tuple = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = False UpperCAmelCase : Optional[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = nn.Dropout(p=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = nn.ModuleList() for lyr_num in range(_SCREAMING_SNAKE_CASE ): # FiLM conditional T5 decoder UpperCAmelCase : List[Any] = DecoderLayer(d_model=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE ) self.decoders.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = TaLayerNorm(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = nn.Dropout(p=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' UpperCAmelCase : str = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. UpperCAmelCase : Union[str, Any] = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) UpperCAmelCase : Any = self.conditioning_emb(_SCREAMING_SNAKE_CASE ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) UpperCAmelCase : Dict = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. UpperCAmelCase : int = torch.broadcast_to( torch.arange(_SCREAMING_SNAKE_CASE , device=decoder_input_tokens.device ) , (batch, seq_length) , ) UpperCAmelCase : List[str] = self.position_encoding(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = self.continuous_inputs_projection(_SCREAMING_SNAKE_CASE ) inputs += position_encodings UpperCAmelCase : List[str] = self.dropout(_SCREAMING_SNAKE_CASE ) # decoder: No padding present. UpperCAmelCase : Dict = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. UpperCAmelCase : Optional[int] = [(x, self.encoder_decoder_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )) for x, y in encodings_and_masks] # cross attend style: concat encodings UpperCAmelCase : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) UpperCAmelCase : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: UpperCAmelCase : List[Any] = lyr( _SCREAMING_SNAKE_CASE , conditioning_emb=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )[0] UpperCAmelCase : List[Any] = self.decoder_norm(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = self.post_dropout(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = self.spec_out(_SCREAMING_SNAKE_CASE ) return spec_out class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1E-6 ) -> List[str]: '''simple docstring''' super().__init__() UpperCAmelCase : List[Any] = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , layer_norm_epsilon=_SCREAMING_SNAKE_CASE , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , layer_norm_epsilon=_SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any = self.layer[0]( _SCREAMING_SNAKE_CASE , conditioning_emb=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , ) if encoder_hidden_states is not None: UpperCAmelCase : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) UpperCAmelCase : List[Any] = self.layer[1]( _SCREAMING_SNAKE_CASE , key_value_states=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , ) # Apply Film Conditional Feed Forward layer UpperCAmelCase : Dict = self.layer[-1](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return (hidden_states,) class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' super().__init__() UpperCAmelCase : Union[str, Any] = TaLayerNorm(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = Attention(query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , out_bias=_SCREAMING_SNAKE_CASE , scale_qk=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = nn.Dropout(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Optional[int] = self.layer_norm(_SCREAMING_SNAKE_CASE ) if conditioning_emb is not None: UpperCAmelCase : str = self.FiLMLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Self-attention block UpperCAmelCase : List[Any] = self.attention(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = hidden_states + self.dropout(_SCREAMING_SNAKE_CASE ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' super().__init__() UpperCAmelCase : List[str] = Attention(query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , out_bias=_SCREAMING_SNAKE_CASE , scale_qk=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = TaLayerNorm(_SCREAMING_SNAKE_CASE , eps=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = nn.Dropout(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Tuple = self.layer_norm(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = self.attention( _SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , attention_mask=attention_mask.squeeze(1 ) , ) UpperCAmelCase : Dict = hidden_states + self.dropout(_SCREAMING_SNAKE_CASE ) return layer_output class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' super().__init__() UpperCAmelCase : int = TaDenseGatedActDense(d_model=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 , out_features=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = TaLayerNorm(_SCREAMING_SNAKE_CASE , eps=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = nn.Dropout(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any: '''simple docstring''' UpperCAmelCase : List[str] = self.layer_norm(_SCREAMING_SNAKE_CASE ) if conditioning_emb is not None: UpperCAmelCase : Union[str, Any] = self.film(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = self.DenseReluDense(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = hidden_states + self.dropout(_SCREAMING_SNAKE_CASE ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' super().__init__() UpperCAmelCase : Any = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = nn.Dropout(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = NewGELUActivation() def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' UpperCAmelCase : Optional[Any] = self.act(self.wi_a(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Optional[Any] = self.wi_a(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear UpperCAmelCase : int = self.dropout(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = self.wo(_SCREAMING_SNAKE_CASE ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1E-6 ) -> List[str]: '''simple docstring''' super().__init__() UpperCAmelCase : List[str] = nn.Parameter(torch.ones(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Optional[Any] = eps def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: UpperCAmelCase : str = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> torch.Tensor: '''simple docstring''' return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_SCREAMING_SNAKE_CASE , 3.0 )) )) class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' super().__init__() UpperCAmelCase : Union[str, Any] = nn.Linear(_SCREAMING_SNAKE_CASE , out_features * 2 , bias=_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' UpperCAmelCase : int = self.scale_bias(_SCREAMING_SNAKE_CASE ) UpperCAmelCase , UpperCAmelCase : Optional[int] = torch.chunk(_SCREAMING_SNAKE_CASE , 2 , -1 ) UpperCAmelCase : Optional[int] = x * (1 + scale) + shift return x
359
0
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake __A =numpy.array([0, 0]) __A =numpy.array([0.5, 0.8_6_6_0_2_5_4]) __A =numpy.array([1, 0]) __A =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase__ : Any = initial_vectors for _ in range(UpperCamelCase__ ): UpperCAmelCase__ : str = iteration_step(UpperCamelCase__ ) return vectors def _UpperCamelCase ( UpperCamelCase__ ): UpperCAmelCase__ : Optional[Any] = [] for i, start_vector in enumerate(vectors[:-1] ): UpperCAmelCase__ : Union[str, Any] = vectors[i + 1] new_vectors.append(UpperCamelCase__ ) UpperCAmelCase__ : List[str] = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase__ : Union[str, Any] = numpy.radians(UpperCamelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = numpy.cos(UpperCamelCase__ ), numpy.sin(UpperCamelCase__ ) UpperCAmelCase__ : Optional[int] = numpy.array(((c, -s), (s, c)) ) return numpy.dot(UpperCamelCase__ , UpperCamelCase__ ) def _UpperCamelCase ( UpperCamelCase__ ): UpperCAmelCase__ : Dict = plt.gca() axes.set_aspect("""equal""" ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() UpperCAmelCase__ , UpperCAmelCase__ : Tuple = zip(*UpperCamelCase__ ) plt.plot(UpperCamelCase__ , UpperCamelCase__ ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() __A =iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
407
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p UpperCAmelCase__ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} UpperCAmelCase__ : List[Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : List[str] = min_resolution UpperCAmelCase__ : Optional[Any] = max_resolution UpperCAmelCase__ : List[str] = do_resize UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : int = image_mean UpperCAmelCase__ : Dict = image_std UpperCAmelCase__ : Any = do_rescale UpperCAmelCase__ : str = rescale_factor UpperCAmelCase__ : List[str] = do_pad def snake_case__ ( self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False): if not batched: UpperCAmelCase__ : List[Any] = image_inputs[0] if isinstance(_lowerCamelCase , Image.Image): UpperCAmelCase__ , UpperCAmelCase__ : str = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : List[str] = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : List[Any] = int(self.size["""shortest_edge"""] * h / w) UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""] elif w > h: UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""] UpperCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h) else: UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""] UpperCAmelCase__ : int = self.size["""shortest_edge"""] else: UpperCAmelCase__ : str = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : Any = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) UpperCAmelCase__ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[0])[0] UpperCAmelCase__ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[1])[1] return expected_height, expected_width @require_torch @require_vision class _snake_case ( a__ , unittest.TestCase ): lowerCAmelCase :Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None def snake_case__ ( self): UpperCAmelCase__ : Optional[Any] = DeformableDetrImageProcessingTester(self) @property def snake_case__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self): UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_lowerCamelCase , """image_mean""")) self.assertTrue(hasattr(_lowerCamelCase , """image_std""")) self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""")) self.assertTrue(hasattr(_lowerCamelCase , """do_resize""")) self.assertTrue(hasattr(_lowerCamelCase , """do_rescale""")) self.assertTrue(hasattr(_lowerCamelCase , """do_pad""")) self.assertTrue(hasattr(_lowerCamelCase , """size""")) def snake_case__ ( self): UpperCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333}) self.assertEqual(image_processor.do_pad , _lowerCamelCase) UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84}) self.assertEqual(image_processor.do_pad , _lowerCamelCase) def snake_case__ ( self): pass def snake_case__ ( self): # Initialize image_processing UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image) # Test not batched input UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase) UpperCAmelCase__ : Optional[int] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self): # Initialize image_processing UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors UpperCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray) # Test not batched input UpperCAmelCase__ : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self): # Initialize image_processing UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor) # Test not batched input UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : int = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case__ ( self): # prepare image and target UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f: UpperCAmelCase__ : Dict = json.loads(f.read()) UpperCAmelCase__ : int = {"""image_id""": 3_9769, """annotations""": target} # encode them UpperCAmelCase__ : Dict = DeformableDetrImageProcessor() UpperCAmelCase__ : int = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors="""pt""") # verify pixel values UpperCAmelCase__ : Tuple = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase) UpperCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4)) # verify area UpperCAmelCase__ : List[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase)) # verify boxes UpperCAmelCase__ : Union[str, Any] = torch.Size([6, 4]) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase) UpperCAmelCase__ : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3)) # verify image_id UpperCAmelCase__ : Optional[int] = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase)) # verify is_crowd UpperCAmelCase__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase)) # verify class_labels UpperCAmelCase__ : Any = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase)) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase)) # verify size UpperCAmelCase__ : List[Any] = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase)) @slow def snake_case__ ( self): # prepare image, target and masks_path UpperCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f: UpperCAmelCase__ : Optional[int] = json.loads(f.read()) UpperCAmelCase__ : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} UpperCAmelCase__ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""") # encode them UpperCAmelCase__ : List[str] = DeformableDetrImageProcessor(format="""coco_panoptic""") UpperCAmelCase__ : Tuple = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors="""pt""") # verify pixel values UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase) UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4)) # verify area UpperCAmelCase__ : str = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase)) # verify boxes UpperCAmelCase__ : List[str] = torch.Size([6, 4]) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase) UpperCAmelCase__ : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3)) # verify image_id UpperCAmelCase__ : Tuple = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase)) # verify is_crowd UpperCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase)) # verify class_labels UpperCAmelCase__ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase)) # verify masks UpperCAmelCase__ : Dict = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCamelCase) # verify orig_size UpperCAmelCase__ : Any = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase)) # verify size UpperCAmelCase__ : int = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase))
407
1
"""simple docstring""" import functools def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int] ): '''simple docstring''' if not isinstance(a_ , a_ ) or not all(isinstance(a_ , a_ ) for day in days ): raise ValueError('''The parameter days should be a list of integers''' ) if len(a_ ) != 3 or not all(isinstance(a_ , a_ ) for cost in costs ): raise ValueError('''The parameter costs should be a list of three integers''' ) if len(a_ ) == 0: return 0 if min(a_ ) <= 0: raise ValueError('''All days elements should be greater than 0''' ) if max(a_ ) >= 366: raise ValueError('''All days elements should be less than 366''' ) __lowerCamelCase : Any =set(a_ ) @functools.cache def dynamic_programming(SCREAMING_SNAKE_CASE : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
705
"""simple docstring""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _UpperCamelCase = logging.get_logger(__name__) # General docstring _UpperCamelCase = 'RegNetConfig' # Base docstring _UpperCamelCase = 'facebook/regnet-y-040' _UpperCamelCase = [1, 1088, 7, 7] # Image classification docstring _UpperCamelCase = 'facebook/regnet-y-040' _UpperCamelCase = 'tabby, tabby cat' _UpperCamelCase = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self :Dict , __lowercase :int , __lowercase :int = 3 , __lowercase :int = 1 , __lowercase :int = 1 , __lowercase :Optional[str] = "relu" , **__lowercase :int , ): super().__init__(**__lowercase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __lowerCamelCase : int =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __lowerCamelCase : str =tf.keras.layers.ConvaD( filters=__lowercase , kernel_size=__lowercase , strides=__lowercase , padding='''VALID''' , groups=__lowercase , use_bias=__lowercase , name='''convolution''' , ) __lowerCamelCase : str =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) __lowerCamelCase : Optional[int] =ACTaFN[activation] if activation is not None else tf.identity def __lowercase ( self :Optional[int] , __lowercase :Any ): __lowerCamelCase : str =self.convolution(self.padding(__lowercase ) ) __lowerCamelCase : Optional[int] =self.normalization(__lowercase ) __lowerCamelCase : Any =self.activation(__lowercase ) return hidden_state class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , **__lowercase :Any ): super().__init__(**__lowercase ) __lowerCamelCase : Tuple =config.num_channels __lowerCamelCase : Union[str, Any] =TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def __lowercase ( self :int , __lowercase :List[str] ): __lowerCamelCase : int =shape_list(__lowercase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __lowerCamelCase : Union[str, Any] =tf.transpose(__lowercase , perm=(0, 2, 3, 1) ) __lowerCamelCase : Optional[int] =self.embedder(__lowercase ) return hidden_state class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self :List[Any] , __lowercase :int , __lowercase :int = 2 , **__lowercase :Optional[int] ): super().__init__(**__lowercase ) __lowerCamelCase : int =tf.keras.layers.ConvaD( filters=__lowercase , kernel_size=1 , strides=__lowercase , use_bias=__lowercase , name='''convolution''' ) __lowerCamelCase : List[str] =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def __lowercase ( self :Optional[Any] , __lowercase :tf.Tensor , __lowercase :bool = False ): return self.normalization(self.convolution(__lowercase ) , training=__lowercase ) class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self :Dict , __lowercase :int , __lowercase :int , **__lowercase :List[str] ): super().__init__(**__lowercase ) __lowerCamelCase : int =tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''' ) __lowerCamelCase : int =[ tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def __lowercase ( self :Dict , __lowercase :Union[str, Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __lowerCamelCase : Any =self.pooler(__lowercase ) for layer_module in self.attention: __lowerCamelCase : Any =layer_module(__lowercase ) __lowerCamelCase : Dict =hidden_state * pooled return hidden_state class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self :Optional[int] , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 1 , **__lowercase :str ): super().__init__(**__lowercase ) __lowerCamelCase : Dict =in_channels != out_channels or stride != 1 __lowerCamelCase : int =max(1 , out_channels // config.groups_width ) __lowerCamelCase : List[str] =( TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __lowerCamelCase : str =[ TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( __lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.2''' ), ] __lowerCamelCase : Optional[int] =ACTaFN[config.hidden_act] def __lowercase ( self :int , __lowercase :Optional[int] ): __lowerCamelCase : List[Any] =hidden_state for layer_module in self.layers: __lowerCamelCase : str =layer_module(__lowercase ) __lowerCamelCase : List[Any] =self.shortcut(__lowercase ) hidden_state += residual __lowerCamelCase : Optional[int] =self.activation(__lowercase ) return hidden_state class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 1 , **__lowercase :List[str] ): super().__init__(**__lowercase ) __lowerCamelCase : Optional[Any] =in_channels != out_channels or stride != 1 __lowerCamelCase : Optional[Any] =max(1 , out_channels // config.groups_width ) __lowerCamelCase : Dict =( TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) __lowerCamelCase : Union[str, Any] =[ TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( __lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(__lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.3''' ), ] __lowerCamelCase : Tuple =ACTaFN[config.hidden_act] def __lowercase ( self :Tuple , __lowercase :Tuple ): __lowerCamelCase : List[Any] =hidden_state for layer_module in self.layers: __lowerCamelCase : int =layer_module(__lowercase ) __lowerCamelCase : List[str] =self.shortcut(__lowercase ) hidden_state += residual __lowerCamelCase : List[str] =self.activation(__lowercase ) return hidden_state class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self :int , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 2 , __lowercase :int = 2 , **__lowercase :Union[str, Any] ): super().__init__(**__lowercase ) __lowerCamelCase : List[str] =TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer __lowerCamelCase : List[Any] =[ # downsampling is done in the first layer with stride of 2 layer(__lowercase , __lowercase , __lowercase , stride=__lowercase , name='''layers.0''' ), *[layer(__lowercase , __lowercase , __lowercase , name=f'layers.{i+1}' ) for i in range(depth - 1 )], ] def __lowercase ( self :int , __lowercase :List[str] ): for layer_module in self.layers: __lowerCamelCase : int =layer_module(__lowercase ) return hidden_state class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self :List[Any] , __lowercase :RegNetConfig , **__lowercase :List[str] ): super().__init__(**__lowercase ) __lowerCamelCase : Optional[int] =[] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) __lowerCamelCase : Any =zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(__lowercase , __lowercase , __lowercase , depth=__lowercase , name=f'stages.{i+1}' ) ) def __lowercase ( self :str , __lowercase :tf.Tensor , __lowercase :bool = False , __lowercase :bool = True ): __lowerCamelCase : Optional[Any] =() if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowerCamelCase : Dict =hidden_states + (hidden_state,) __lowerCamelCase : List[Any] =stage_module(__lowercase ) if output_hidden_states: __lowerCamelCase : Union[str, Any] =hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase ) @keras_serializable class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ): """simple docstring""" __snake_case : Optional[int] = RegNetConfig def __init__( self :List[Any] , __lowercase :Dict , **__lowercase :Union[str, Any] ): super().__init__(**__lowercase ) __lowerCamelCase : int =config __lowerCamelCase : List[str] =TFRegNetEmbeddings(__lowercase , name='''embedder''' ) __lowerCamelCase : List[str] =TFRegNetEncoder(__lowercase , name='''encoder''' ) __lowerCamelCase : List[Any] =tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''' ) @unpack_inputs def __lowercase ( self :List[Any] , __lowercase :tf.Tensor , __lowercase :Optional[bool] = None , __lowercase :Optional[bool] = None , __lowercase :bool = False , ): __lowerCamelCase : Union[str, Any] =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Tuple =return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Tuple =self.embedder(__lowercase , training=__lowercase ) __lowerCamelCase : Optional[Any] =self.encoder( __lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase ) __lowerCamelCase : str =encoder_outputs[0] __lowerCamelCase : Tuple =self.pooler(__lowercase ) # Change to NCHW output format have uniformity in the modules __lowerCamelCase : int =tf.transpose(__lowercase , perm=(0, 3, 1, 2) ) __lowerCamelCase : Any =tf.transpose(__lowercase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __lowerCamelCase : str =tuple([tf.transpose(__lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class SCREAMING_SNAKE_CASE_ ( snake_case__ ): """simple docstring""" __snake_case : Optional[int] = RegNetConfig __snake_case : int = """regnet""" __snake_case : int = """pixel_values""" @property def __lowercase ( self :List[str] ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _UpperCamelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' _UpperCamelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( """The bare RegNet model outputting raw features without any specific head on top.""" , snake_case__ , ) class SCREAMING_SNAKE_CASE_ ( snake_case__ ): """simple docstring""" def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , *__lowercase :List[str] , **__lowercase :int ): super().__init__(__lowercase , *__lowercase , **__lowercase ) __lowerCamelCase : Tuple =TFRegNetMainLayer(__lowercase , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(__lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __lowercase ( self :Optional[Any] , __lowercase :tf.Tensor , __lowercase :Optional[bool] = None , __lowercase :Optional[bool] = None , __lowercase :Optional[int]=False , ): __lowerCamelCase : List[Any] =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Dict =self.regnet( pixel_values=__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , snake_case__ , ) class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ): """simple docstring""" def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , *__lowercase :List[Any] , **__lowercase :Dict ): super().__init__(__lowercase , *__lowercase , **__lowercase ) __lowerCamelCase : Optional[int] =config.num_labels __lowerCamelCase : Optional[int] =TFRegNetMainLayer(__lowercase , name='''regnet''' ) # classification head __lowerCamelCase : Union[str, Any] =[ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __lowercase ( self :List[Any] , __lowercase :tf.Tensor = None , __lowercase :tf.Tensor = None , __lowercase :bool = None , __lowercase :bool = None , __lowercase :int=False , ): __lowerCamelCase : str =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : str =self.regnet( __lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase ) __lowerCamelCase : Any =outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase : List[str] =self.classifier[0](__lowercase ) __lowerCamelCase : str =self.classifier[1](__lowercase ) __lowerCamelCase : str =None if labels is None else self.hf_compute_loss(labels=__lowercase , logits=__lowercase ) if not return_dict: __lowerCamelCase : Optional[int] =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states )
363
0
import torch def UpperCAmelCase__ ( ): if torch.cuda.is_available(): __a : str = torch.cuda.device_count() else: __a : List[str] = 0 print(f'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
47
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
47
1
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __lowerCamelCase = logging.getLogger(__name__) class snake_case_ (lowercase__ ): """simple docstring""" _lowerCamelCase = """token-classification""" def __init__( self ,lowercase): """simple docstring""" if type(lowercase) == dict: UpperCAmelCase_ : List[str] = Namespace(**lowercase) UpperCAmelCase_ : List[Any] = import_module("tasks") try: UpperCAmelCase_ : Dict = getattr(lowercase ,hparams.task_type) UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""") UpperCAmelCase_ : List[Any] = self.token_classification_task.get_labels(hparams.labels) UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index super().__init__(lowercase ,len(self.labels) ,self.mode) def A_ ( self ,**lowercase): """simple docstring""" return self.model(**lowercase) def A_ ( self ,lowercase ,lowercase): """simple docstring""" UpperCAmelCase_ : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": UpperCAmelCase_ : Optional[Any] = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids UpperCAmelCase_ : Optional[int] = self(**lowercase) UpperCAmelCase_ : Dict = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def A_ ( self): """simple docstring""" UpperCAmelCase_ : Tuple = self.hparams for mode in ["train", "dev", "test"]: UpperCAmelCase_ : Tuple = self._feature_file(lowercase) if os.path.exists(lowercase) and not args.overwrite_cache: logger.info("Loading features from cached file %s" ,lowercase) UpperCAmelCase_ : str = torch.load(lowercase) else: logger.info("Creating features from dataset file at %s" ,args.data_dir) UpperCAmelCase_ : Tuple = self.token_classification_task.read_examples_from_file(args.data_dir ,lowercase) UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features( lowercase ,self.labels ,args.max_seq_length ,self.tokenizer ,cls_token_at_end=bool(self.config.model_type in ["xlnet"]) ,cls_token=self.tokenizer.cls_token ,cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 ,sep_token=self.tokenizer.sep_token ,sep_token_extra=lowercase ,pad_on_left=bool(self.config.model_type in ["xlnet"]) ,pad_token=self.tokenizer.pad_token_id ,pad_token_segment_id=self.tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,) logger.info("Saving features into cached file %s" ,lowercase) torch.save(lowercase ,lowercase) def A_ ( self ,lowercase ,lowercase ,lowercase = False): """simple docstring""" UpperCAmelCase_ : Dict = self._feature_file(lowercase) logger.info("Loading features from cached file %s" ,lowercase) UpperCAmelCase_ : Union[str, Any] = torch.load(lowercase) UpperCAmelCase_ : List[Any] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long) UpperCAmelCase_ : Any = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long) if features[0].token_type_ids is not None: UpperCAmelCase_ : List[Any] = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long) else: UpperCAmelCase_ : Union[str, Any] = torch.tensor([0 for f in features] ,dtype=torch.long) # HACK(we will not use this anymore soon) UpperCAmelCase_ : Tuple = torch.tensor([f.label_ids for f in features] ,dtype=torch.long) return DataLoader( TensorDataset(lowercase ,lowercase ,lowercase ,lowercase) ,batch_size=lowercase) def A_ ( self ,lowercase ,lowercase): """simple docstring""" """Compute validation""" "" UpperCAmelCase_ : Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": UpperCAmelCase_ : Optional[Any] = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids UpperCAmelCase_ : Optional[int] = self(**lowercase) UpperCAmelCase_ : Dict = outputs[:2] UpperCAmelCase_ : List[str] = logits.detach().cpu().numpy() UpperCAmelCase_ : Dict = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def A_ ( self ,lowercase): """simple docstring""" UpperCAmelCase_ : Tuple = torch.stack([x["val_loss"] for x in outputs]).mean() UpperCAmelCase_ : Dict = np.concatenate([x["pred"] for x in outputs] ,axis=0) UpperCAmelCase_ : Optional[Any] = np.argmax(lowercase ,axis=2) UpperCAmelCase_ : List[str] = np.concatenate([x["target"] for x in outputs] ,axis=0) UpperCAmelCase_ : Optional[int] = dict(enumerate(self.labels)) UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0])] UpperCAmelCase_ : Any = [[] for _ in range(out_label_ids.shape[0])] for i in range(out_label_ids.shape[0]): for j in range(out_label_ids.shape[1]): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) UpperCAmelCase_ : List[Any] = { "val_loss": val_loss_mean, "accuracy_score": accuracy_score(lowercase ,lowercase), "precision": precision_score(lowercase ,lowercase), "recall": recall_score(lowercase ,lowercase), "f1": fa_score(lowercase ,lowercase), } UpperCAmelCase_ : int = dict(results.items()) UpperCAmelCase_ : Optional[Any] = results return ret, preds_list, out_label_list def A_ ( self ,lowercase): """simple docstring""" UpperCAmelCase_ : Optional[Any] = self._eval_end(lowercase) UpperCAmelCase_ : int = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def A_ ( self ,lowercase): """simple docstring""" UpperCAmelCase_ : Optional[int] = self._eval_end(lowercase) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 UpperCAmelCase_ : int = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def A_ ( lowercase ,lowercase): """simple docstring""" BaseTransformer.add_model_specific_args(lowercase ,lowercase) parser.add_argument( "--task_type" ,default="NER" ,type=lowercase ,help="Task type to fine tune in training (e.g. NER, POS, etc)") parser.add_argument( "--max_seq_length" ,default=128 ,type=lowercase ,help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) ,) parser.add_argument( "--labels" ,default="" ,type=lowercase ,help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." ,) parser.add_argument( "--gpus" ,default=0 ,type=lowercase ,help="The number of GPUs allocated for this, it is by default 0 meaning none" ,) parser.add_argument( "--overwrite_cache" ,action="store_true" ,help="Overwrite the cached training and evaluation sets") return parser if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __lowerCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd()) __lowerCamelCase = parser.parse_args() __lowerCamelCase = NERTransformer(args) __lowerCamelCase = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True)) __lowerCamelCase = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
701
def _snake_case ( __snake_case , __snake_case ) -> int: '''simple docstring''' return int((input_a, input_a).count(0 ) != 0 ) def _snake_case ( ) -> None: '''simple docstring''' assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
455
0
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Any = {"""vocab_file""": """vocab.txt"""} snake_case__ : int = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } snake_case__ : int = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def _snake_case (__lowercase): UpperCamelCase_ = collections.OrderedDict() with open(__lowercase , 'r' , encoding='utf-8') as reader: UpperCamelCase_ = reader.readlines() for index, token in enumerate(__lowercase): UpperCamelCase_ = token.rstrip('\n') UpperCamelCase_ = index return vocab class _a ( UpperCAmelCase__ ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<unk>" , _UpperCAmelCase=200 ) -> Any: UpperCamelCase_ = vocab UpperCamelCase_ = unk_token UpperCamelCase_ = max_input_chars_per_word def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = list(_UpperCAmelCase ) if len(_UpperCAmelCase ) > self.max_input_chars_per_word: return [self.unk_token] UpperCamelCase_ = 0 UpperCamelCase_ = [] while start < len(_UpperCAmelCase ): UpperCamelCase_ = len(_UpperCAmelCase ) UpperCamelCase_ = None while start < end: UpperCamelCase_ = ''.join(chars[start:end] ) if substr in self.vocab: UpperCamelCase_ = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(_UpperCAmelCase ) UpperCamelCase_ = end return sub_tokens class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ["""input_ids""", """attention_mask"""] A_ = False def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<d>" , _UpperCAmelCase="</d>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="</n>" , _UpperCAmelCase="</_>" , _UpperCAmelCase="left" , **_UpperCAmelCase , ) -> List[Any]: requires_backends(self , ['jieba'] ) super().__init__( bod_token=_UpperCAmelCase , eod_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , line_token=_UpperCAmelCase , space_token=_UpperCAmelCase , padding_side=_UpperCAmelCase , **_UpperCAmelCase , ) UpperCamelCase_ = bod_token UpperCamelCase_ = eod_token UpperCamelCase_ = load_vocab(_UpperCAmelCase ) UpperCamelCase_ = self.encoder[space_token] UpperCamelCase_ = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] UpperCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCAmelCase : x[1] ) ) UpperCamelCase_ = {v: k for k, v in self.encoder.items()} UpperCamelCase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _UpperCAmelCase ( self ) -> Optional[Any]: return self.encoder[self.bod_token] @property def _UpperCAmelCase ( self ) -> List[Any]: return self.encoder[self.eod_token] @property def _UpperCAmelCase ( self ) -> Any: return self.encoder["\n"] @property def _UpperCAmelCase ( self ) -> int: return len(self.encoder ) def _UpperCAmelCase ( self ) -> str: return dict(self.encoder , **self.added_tokens_encoder ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = [] for x in jieba.cut(_UpperCAmelCase , cut_all=_UpperCAmelCase ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCAmelCase ) ) return output_tokens def _UpperCAmelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = [i for i in token_ids if i >= 0] UpperCamelCase_ = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str: return token in self.encoder def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str: return "".join(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int: return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[Any]: return self.decoder.get(_UpperCAmelCase , self.unk_token ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]: if os.path.isdir(_UpperCAmelCase ): UpperCamelCase_ = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: UpperCamelCase_ = (filename_prefix + '-' if filename_prefix else '') + save_directory UpperCamelCase_ = 0 if " " in self.encoder: UpperCamelCase_ = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: UpperCamelCase_ = self.encoder['\n'] del self.encoder["\n"] UpperCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCAmelCase : x[1] ) ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) UpperCamelCase_ = token_index writer.write(token + '\n' ) index += 1 return (vocab_file,) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) return [1] + ([0] * len(_UpperCAmelCase ))
23
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _lowerCAmelCase ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : List[Any] ): '''simple docstring''' _snake_case : Any = [] def UpperCamelCase_ ( self : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int , **UpperCamelCase : Any ): '''simple docstring''' self.events.append('on_init_end' ) def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ): '''simple docstring''' self.events.append('on_train_begin' ) def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , **UpperCamelCase : Any ): '''simple docstring''' self.events.append('on_train_end' ) def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , **UpperCamelCase : str ): '''simple docstring''' self.events.append('on_epoch_begin' ) def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.events.append('on_epoch_end' ) def UpperCamelCase_ ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.events.append('on_step_begin' ) def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' self.events.append('on_step_end' ) def UpperCamelCase_ ( self : str , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' self.events.append('on_evaluate' ) def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : str , **UpperCamelCase : List[Any] ): '''simple docstring''' self.events.append('on_predict' ) def UpperCamelCase_ ( self : int , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : int , **UpperCamelCase : Optional[int] ): '''simple docstring''' self.events.append('on_save' ) def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' self.events.append('on_log' ) def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : str , **UpperCamelCase : Dict ): '''simple docstring''' self.events.append('on_prediction_step' ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : Dict ): '''simple docstring''' _snake_case : str = tempfile.mkdtemp() def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' shutil.rmtree(self.output_dir ) def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Tuple=0 , UpperCamelCase : str=0 , UpperCamelCase : str=64 , UpperCamelCase : str=64 , UpperCamelCase : List[Any]=None , UpperCamelCase : Union[str, Any]=False , **UpperCamelCase : List[str] ): '''simple docstring''' _snake_case : Tuple = RegressionDataset(length=UpperCamelCase ) _snake_case : int = RegressionDataset(length=UpperCamelCase ) _snake_case : Tuple = RegressionModelConfig(a=UpperCamelCase , b=UpperCamelCase ) _snake_case : Union[str, Any] = RegressionPreTrainedModel(UpperCamelCase ) _snake_case : Optional[Any] = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase , report_to=[] , **UpperCamelCase ) return Trainer( UpperCamelCase , UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , callbacks=UpperCamelCase , ) def UpperCamelCase_ ( self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ): '''simple docstring''' self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) # Order doesn't matter _snake_case : Optional[Any] = sorted(UpperCamelCase , key=lambda UpperCamelCase : cb.__name__ if isinstance(UpperCamelCase , UpperCamelCase ) else cb.__class__.__name__ ) _snake_case : Dict = sorted(UpperCamelCase , key=lambda UpperCamelCase : cb.__name__ if isinstance(UpperCamelCase , UpperCamelCase ) else cb.__class__.__name__ ) for cba, cba in zip(UpperCamelCase , UpperCamelCase ): if isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ): self.assertEqual(UpperCamelCase , UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ): self.assertEqual(UpperCamelCase , cba.__class__ ) elif not isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ): self.assertEqual(cba.__class__ , UpperCamelCase ) else: self.assertEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase_ ( self : Dict , UpperCamelCase : Dict ): '''simple docstring''' _snake_case : Dict = ['on_init_end', 'on_train_begin'] _snake_case : List[str] = 0 _snake_case : str = len(trainer.get_eval_dataloader() ) _snake_case : str = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs ): expected_events.append('on_epoch_begin' ) for _ in range(UpperCamelCase ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save' ) expected_events.append('on_epoch_end' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def UpperCamelCase_ ( self : str ): '''simple docstring''' _snake_case : int = self.get_trainer() _snake_case : List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase ) # Callbacks passed at init are added to the default callbacks _snake_case : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(UpperCamelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _snake_case : Tuple = self.get_trainer(disable_tqdm=UpperCamelCase ) _snake_case : Optional[int] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _snake_case : Tuple = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _snake_case : List[Any] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(UpperCamelCase ) expected_callbacks.remove(UpperCamelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase ) _snake_case : Optional[Any] = self.get_trainer() _snake_case : Dict = trainer.pop_callback(UpperCamelCase ) self.assertEqual(cb.__class__ , UpperCamelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase ) trainer.add_callback(UpperCamelCase ) expected_callbacks.insert(0 , UpperCamelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase ) # We can also add, pop, or remove by instance _snake_case : Optional[Any] = self.get_trainer() _snake_case : Optional[int] = trainer.callback_handler.callbacks[0] trainer.remove_callback(UpperCamelCase ) expected_callbacks.remove(UpperCamelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase ) _snake_case : int = self.get_trainer() _snake_case : int = trainer.callback_handler.callbacks[0] _snake_case : int = trainer.pop_callback(UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase ) trainer.add_callback(UpperCamelCase ) expected_callbacks.insert(0 , UpperCamelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' , category=UpperCamelCase ) _snake_case : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _snake_case : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) ) # Independent log/save/eval _snake_case : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _snake_case : List[Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) ) _snake_case : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _snake_case : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) ) _snake_case : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' ) trainer.train() _snake_case : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) ) _snake_case : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' ) trainer.train() _snake_case : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) ) # A bit of everything _snake_case : int = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , ) trainer.train() _snake_case : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) ) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning' ) as warn_mock: _snake_case : Dict = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(UpperCamelCase ) in warn_mock.call_args[0][0]
411
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ = logging.get_logger(__name__) class _a ( lowerCamelCase_ ): """simple docstring""" __SCREAMING_SNAKE_CASE = ['pixel_values'] def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PIL.Image.BICUBIC , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 / 255 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): super().__init__(**lowerCAmelCase_ ) _lowercase =size if size is not None else {"height": 256, "width": 256} _lowercase =get_size_dict(lowerCAmelCase_ ) _lowercase =crop_size if crop_size is not None else {"height": 224, "width": 224} _lowercase =get_size_dict(lowerCAmelCase_ , param_name="crop_size" ) _lowercase =do_resize _lowercase =size _lowercase =resample _lowercase =do_center_crop _lowercase =crop_size _lowercase =do_rescale _lowercase =rescale_factor _lowercase =do_normalize _lowercase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase =image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PIL.Image.BICUBIC , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): _lowercase =get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return resize( lowerCAmelCase_ , size=(size["height"], size["width"]) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): _lowercase =get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ): return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ): _lowercase =do_resize if do_resize is not None else self.do_resize _lowercase =resample if resample is not None else self.resample _lowercase =do_center_crop if do_center_crop is not None else self.do_center_crop _lowercase =do_rescale if do_rescale is not None else self.do_rescale _lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase =do_normalize if do_normalize is not None else self.do_normalize _lowercase =image_mean if image_mean is not None else self.image_mean _lowercase =image_std if image_std is not None else self.image_std _lowercase =size if size is not None else self.size _lowercase =get_size_dict(lowerCAmelCase_ ) _lowercase =crop_size if crop_size is not None else self.crop_size _lowercase =get_size_dict(lowerCAmelCase_ , param_name="crop_size" ) _lowercase =make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _lowercase =[to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _lowercase =[self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _lowercase =[self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _lowercase =[self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _lowercase =[self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _lowercase =[to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _lowercase ={"pixel_values": images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
594
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCamelCase_ ): """simple docstring""" def __lowerCAmelCase ( self , lowerCAmelCase_ ): return 0.0 def __lowerCamelCase ( __a : np.ndarray , __a : int ) -> tuple[int | float, int | float]: _lowercase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowercase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def __lowerCamelCase ( __a : FilterType , __a : int ) -> None: _lowercase =512 _lowercase =[1] + [0] * (size - 1) _lowercase =[filter_type.process(__a ) for item in inputs] _lowercase =[0] * (samplerate - size) # zero-padding outputs += filler _lowercase =np.abs(np.fft.fft(__a ) ) _lowercase =20 * np.logaa(__a ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds _lowercase =get_bounds(__a , __a ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(__a ) plt.show() def __lowerCamelCase ( __a : FilterType , __a : int ) -> None: _lowercase =512 _lowercase =[1] + [0] * (size - 1) _lowercase =[filter_type.process(__a ) for item in inputs] _lowercase =[0] * (samplerate - size) # zero-padding outputs += filler _lowercase =np.angle(np.fft.fft(__a ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(__a , -2 * pi ) ) plt.show()
594
1
import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowercase_ ( SCREAMING_SNAKE_CASE : ndarray ): """simple docstring""" return np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) class _lowerCAmelCase : """simple docstring""" def __init__( self , *, __SCREAMING_SNAKE_CASE = np.inf , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = 0.0 , ) -> None: """simple docstring""" snake_case__ : int =regularization snake_case__ : Tuple =gamma if kernel == "linear": snake_case__ : str =self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('''rbf kernel requires gamma''' ) if not isinstance(self.gamma , (float, int) ): raise ValueError('''gamma must be float or int''' ) if not self.gamma > 0: raise ValueError('''gamma must be > 0''' ) snake_case__ : Dict =self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: snake_case__ : Optional[int] =f'''Unknown kernel: {kernel}''' raise ValueError(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" return np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" snake_case__ : Optional[Any] =observations snake_case__ : Union[str, Any] =classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((snake_case__), ) : Dict =np.shape(__SCREAMING_SNAKE_CASE ) def to_minimize(__SCREAMING_SNAKE_CASE ) -> float: snake_case__ : List[str] =0 ((snake_case__), ) : int =np.shape(__SCREAMING_SNAKE_CASE ) for i in range(__SCREAMING_SNAKE_CASE ): for j in range(__SCREAMING_SNAKE_CASE ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] =LinearConstraint(__SCREAMING_SNAKE_CASE , 0 , 0 ) snake_case__ : int =Bounds(0 , self.regularization ) snake_case__ : Union[str, Any] =minimize( __SCREAMING_SNAKE_CASE , np.ones(__SCREAMING_SNAKE_CASE ) , bounds=__SCREAMING_SNAKE_CASE , constraints=[ly_contraint] ).x snake_case__ : Optional[int] =l_star # calculating mean offset of separation plane to points snake_case__ : Optional[Any] =0 for i in range(__SCREAMING_SNAKE_CASE ): for j in range(__SCREAMING_SNAKE_CASE ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) snake_case__ : int =s / n def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" snake_case__ : int =sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , __SCREAMING_SNAKE_CASE ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
381
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowercase_ ( SCREAMING_SNAKE_CASE : Dataset , SCREAMING_SNAKE_CASE : Dict[str, str] ): """simple docstring""" snake_case__ : str =args.log_outputs snake_case__ : str ='''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric snake_case__ : Dict =load_metric('''wer''' ) snake_case__ : str =load_metric('''cer''' ) # compute metrics snake_case__ : int =wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) snake_case__ : Optional[int] =cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results snake_case__ : Optional[Any] =F'''WER: {wer_result}\nCER: {cer_result}''' print(SCREAMING_SNAKE_CASE ) with open(F'''{dataset_id}_eval_results.txt''' , '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case__ : Union[str, Any] =F'''log_{dataset_id}_predictions.txt''' snake_case__ : Union[str, Any] =F'''log_{dataset_id}_targets.txt''' with open(SCREAMING_SNAKE_CASE , '''w''' ) as p, open(SCREAMING_SNAKE_CASE , '''w''' ) as t: # mapping function to write output def write_to_file(SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ): p.write(F'''{i}''' + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(F'''{i}''' + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(SCREAMING_SNAKE_CASE , with_indices=SCREAMING_SNAKE_CASE ) def lowercase_ ( SCREAMING_SNAKE_CASE : str ): """simple docstring""" snake_case__ : int ='''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case__ : str =re.sub(SCREAMING_SNAKE_CASE , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case__ : Dict =['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: snake_case__ : List[Any] =''' '''.join(text.split(SCREAMING_SNAKE_CASE ) ) return text def lowercase_ ( SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" # load dataset snake_case__ : Optional[Any] =load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case__ : Optional[Any] =AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case__ : int =feature_extractor.sampling_rate # resample audio snake_case__ : Tuple =dataset.cast_column('''audio''' , Audio(sampling_rate=SCREAMING_SNAKE_CASE ) ) # load eval pipeline if args.device is None: snake_case__ : Dict =0 if torch.cuda.is_available() else -1 snake_case__ : Optional[Any] =pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(SCREAMING_SNAKE_CASE : str ): snake_case__ : List[Any] =asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case__ : List[Any] =prediction['''text'''] snake_case__ : int =normalize_text(batch['''sentence'''] ) return batch # run inference on all examples snake_case__ : Optional[int] =dataset.map(SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) lowerCamelCase__ = parser.parse_args() main(args)
381
1
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging a : Any = logging.get_logger(__name__) class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : str SCREAMING_SNAKE_CASE__ : str = None @staticmethod def A_ ( ): '''simple docstring''' raise NotImplementedError def A_ ( self , snake_case , snake_case , snake_case , **snake_case ): '''simple docstring''' raise NotImplementedError def A_ ( self , snake_case ): '''simple docstring''' raise NotImplementedError def A_ ( self ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." ) @classmethod def A_ ( cls ): '''simple docstring''' return f"`pip install {cls.pip_package or cls.name}`" class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = "optuna" @staticmethod def A_ ( ): '''simple docstring''' return is_optuna_available() def A_ ( self , snake_case , snake_case , snake_case , **snake_case ): '''simple docstring''' return run_hp_search_optuna(snake_case , snake_case , snake_case , **snake_case ) def A_ ( self , snake_case ): '''simple docstring''' return default_hp_space_optuna(snake_case ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = "ray" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "'ray[tune]'" @staticmethod def A_ ( ): '''simple docstring''' return is_ray_available() def A_ ( self , snake_case , snake_case , snake_case , **snake_case ): '''simple docstring''' return run_hp_search_ray(snake_case , snake_case , snake_case , **snake_case ) def A_ ( self , snake_case ): '''simple docstring''' return default_hp_space_ray(snake_case ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = "sigopt" @staticmethod def A_ ( ): '''simple docstring''' return is_sigopt_available() def A_ ( self , snake_case , snake_case , snake_case , **snake_case ): '''simple docstring''' return run_hp_search_sigopt(snake_case , snake_case , snake_case , **snake_case ) def A_ ( self , snake_case ): '''simple docstring''' return default_hp_space_sigopt(snake_case ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "wandb" @staticmethod def A_ ( ): '''simple docstring''' return is_wandb_available() def A_ ( self , snake_case , snake_case , snake_case , **snake_case ): '''simple docstring''' return run_hp_search_wandb(snake_case , snake_case , snake_case , **snake_case ) def A_ ( self , snake_case ): '''simple docstring''' return default_hp_space_wandb(snake_case ) a : Optional[Any] = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__magic_name__ ) > 0: UpperCAmelCase : List[str] = available_backends[0].name if len(__magic_name__ ) > 1: logger.info( F"{len(__magic_name__ )} hyperparameter search backends available. Using {name} as the default." ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( F" - To install {backend.name} run {backend.pip_install()}" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
609
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase ( __magic_name__ ): '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(__magic_name__ ): return ext raise Exception( F"Unable to determine file format from file extension {path}. " F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) UpperCAmelCase : Any = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format UpperCAmelCase : Tuple = PipelineDataFormat.from_str( format=__magic_name__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(__magic_name__ , __magic_name__ ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = nlp UpperCAmelCase : str = reader @staticmethod def A_ ( snake_case ): '''simple docstring''' UpperCAmelCase : str = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=snake_case , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=snake_case , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=snake_case , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=snake_case , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=snake_case , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=snake_case , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=snake_case , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=snake_case , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._nlp, [] for entry in self._reader: UpperCAmelCase : Dict = nlp(**snake_case ) if self._reader.is_multi_columns else nlp(snake_case ) if isinstance(snake_case , snake_case ): outputs.append(snake_case ) else: outputs += output # Saving data if self._nlp.binary_output: UpperCAmelCase : str = self._reader.save_binary(snake_case ) logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}" ) else: self._reader.save(snake_case )
609
1
'''simple docstring''' def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = """""" for word_or_phrase in separated: if not isinstance(A_ , A_ ): raise Exception("""join() accepts only strings to be joined""" ) joined += word_or_phrase + separator return joined.strip(A_ ) if __name__ == "__main__": from doctest import testmod testmod()
533
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __snake_case : List[Any] = { '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 10_00, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __snake_case : Optional[int] = { '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 10_00, '''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __snake_case : int = { '''sample_size''': 2_56, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __snake_case : Dict = { '''num_train_timesteps''': 40, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } __snake_case : Tuple = { '''num_train_timesteps''': 2_01, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } __snake_case : str = { '''num_train_timesteps''': 1_51, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } def lowerCamelCase__ ( A_ ): if isinstance(A_ , A_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("boolean value expected" ) def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ): UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""] if has_skip: UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""] UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""] UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ = ( checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCamelCase__ ( A_ , A_ ): UpperCAmelCase_ = torch.load(A_ , map_location="cpu" ) UpperCAmelCase_ = {} UpperCAmelCase_ = checkpoint["time_embed.0.weight"] UpperCAmelCase_ = checkpoint["time_embed.0.bias"] UpperCAmelCase_ = checkpoint["time_embed.2.weight"] UpperCAmelCase_ = checkpoint["time_embed.2.bias"] if unet_config["num_class_embeds"] is not None: UpperCAmelCase_ = checkpoint["label_emb.weight"] UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"] UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"] UpperCAmelCase_ = unet_config["down_block_types"] UpperCAmelCase_ = unet_config["layers_per_block"] UpperCAmelCase_ = unet_config["attention_head_dim"] UpperCAmelCase_ = unet_config["block_out_channels"] UpperCAmelCase_ = 1 UpperCAmelCase_ = channels_list[0] for i, layer_type in enumerate(A_ ): UpperCAmelCase_ = channels_list[i] UpperCAmelCase_ = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(A_ ): UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.0""" UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(A_ ): UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.0""" UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ ) UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.1""" UpperCAmelCase_ = convert_attention( A_ , A_ , A_ , A_ , A_ ) current_layer += 1 if i != len(A_ ) - 1: UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0""" UpperCAmelCase_ = F"""input_blocks.{current_layer}.0""" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ ) current_layer += 1 UpperCAmelCase_ = current_channels # hardcoded the mid-block for now UpperCAmelCase_ = "mid_block.resnets.0" UpperCAmelCase_ = "middle_block.0" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ ) UpperCAmelCase_ = "mid_block.attentions.0" UpperCAmelCase_ = "middle_block.1" UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ ) UpperCAmelCase_ = "mid_block.resnets.1" UpperCAmelCase_ = "middle_block.2" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ ) UpperCAmelCase_ = 0 UpperCAmelCase_ = unet_config["up_block_types"] for i, layer_type in enumerate(A_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""output_blocks.{current_layer}.0""" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ ) current_layer += 1 if i != len(A_ ) - 1: UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0""" UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1""" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}""" UpperCAmelCase_ = F"""output_blocks.{current_layer}.0""" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ ) UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}""" UpperCAmelCase_ = F"""output_blocks.{current_layer}.1""" UpperCAmelCase_ = convert_attention( A_ , A_ , A_ , A_ , A_ ) current_layer += 1 if i != len(A_ ) - 1: UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0""" UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2""" UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ ) UpperCAmelCase_ = checkpoint["out.0.weight"] UpperCAmelCase_ = checkpoint["out.0.bias"] UpperCAmelCase_ = checkpoint["out.2.weight"] UpperCAmelCase_ = checkpoint["out.2.bias"] return new_checkpoint if __name__ == "__main__": __snake_case : List[str] = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') __snake_case : List[str] = parser.parse_args() __snake_case : Any = strabool(args.class_cond) __snake_case : List[str] = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: __snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __snake_case : List[str] = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: __snake_case : Optional[Any] = None __snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config) __snake_case : str = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __snake_case : Tuple = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') __snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config) __snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
660
0
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# a_ : List[Any] = [ # (stable-diffusion, HF Diffusers) ("time_embed.0.weight", "time_embedding.linear_1.weight"), ("time_embed.0.bias", "time_embedding.linear_1.bias"), ("time_embed.2.weight", "time_embedding.linear_2.weight"), ("time_embed.2.bias", "time_embedding.linear_2.bias"), ("input_blocks.0.0.weight", "conv_in.weight"), ("input_blocks.0.0.bias", "conv_in.bias"), ("out.0.weight", "conv_norm_out.weight"), ("out.0.bias", "conv_norm_out.bias"), ("out.2.weight", "conv_out.weight"), ("out.2.bias", "conv_out.bias"), ] a_ : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("in_layers.0", "norm1"), ("in_layers.2", "conv1"), ("out_layers.0", "norm2"), ("out_layers.3", "conv2"), ("emb_layers.1", "time_emb_proj"), ("skip_connection", "conv_shortcut"), ] a_ : List[Any] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks a_ : Tuple = F"""down_blocks.{i}.resnets.{j}.""" a_ : Dict = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 a_ : Dict = F"""down_blocks.{i}.attentions.{j}.""" a_ : Optional[int] = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks a_ : Any = F"""up_blocks.{i}.resnets.{j}.""" a_ : List[str] = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 a_ : List[Any] = F"""up_blocks.{i}.attentions.{j}.""" a_ : Union[str, Any] = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 a_ : int = F"""down_blocks.{i}.downsamplers.0.conv.""" a_ : str = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 a_ : Dict = F"""up_blocks.{i}.upsamplers.0.""" a_ : Optional[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) a_ : Dict = "mid_block.attentions.0." a_ : Any = "middle_block.1." unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): a_ : Union[str, Any] = F"""mid_block.resnets.{j}.""" a_ : List[str] = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: SCREAMING_SNAKE_CASE = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: SCREAMING_SNAKE_CASE = v.replace(_UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: SCREAMING_SNAKE_CASE = v.replace(_UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = v SCREAMING_SNAKE_CASE = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# a_ : Any = [ # (stable-diffusion, HF Diffusers) ("nin_shortcut", "conv_shortcut"), ("norm_out", "conv_norm_out"), ("mid.attn_1.", "mid_block.attentions.0."), ] for i in range(4): # down_blocks have two resnets for j in range(2): a_ : str = F"""encoder.down_blocks.{i}.resnets.{j}.""" a_ : Optional[Any] = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: a_ : int = F"""down_blocks.{i}.downsamplers.0.""" a_ : Tuple = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) a_ : Optional[int] = F"""up_blocks.{i}.upsamplers.0.""" a_ : Optional[Any] = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): a_ : List[Any] = F"""decoder.up_blocks.{i}.resnets.{j}.""" a_ : Any = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): a_ : Union[str, Any] = F"""mid_block.resnets.{i}.""" a_ : List[Any] = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) a_ : int = [ # (stable-diffusion, HF Diffusers) ("norm.", "group_norm."), ("q.", "query."), ("k.", "key."), ("v.", "value."), ("proj_out.", "proj_attn."), ] def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> List[Any]: '''simple docstring''' return w.reshape(*w.shape , 1 , 1 ) def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: SCREAMING_SNAKE_CASE = v.replace(_UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: SCREAMING_SNAKE_CASE = v.replace(_UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = v SCREAMING_SNAKE_CASE = {v: vae_state_dict[k] for k, v in mapping.items()} SCREAMING_SNAKE_CASE = ['q', 'k', 'v', 'proj_out'] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) SCREAMING_SNAKE_CASE = reshape_weight_for_sd(_UpperCamelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# a_ : Optional[int] = [ # (stable-diffusion, HF Diffusers) ("resblocks.", "text_model.encoder.layers."), ("ln_1", "layer_norm1"), ("ln_2", "layer_norm2"), (".c_fc.", ".fc1."), (".c_proj.", ".fc2."), (".attn", ".self_attn"), ("ln_final.", "transformer.text_model.final_layer_norm."), ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), ] a_ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} a_ : Any = re.compile("|".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp a_ : int = {"q": 0, "k": 1, "v": 2} def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} for k, v in text_enc_dict.items(): if ( k.endswith('.self_attn.q_proj.weight' ) or k.endswith('.self_attn.k_proj.weight' ) or k.endswith('.self_attn.v_proj.weight' ) ): SCREAMING_SNAKE_CASE = k[: -len('.q_proj.weight' )] SCREAMING_SNAKE_CASE = k[-len('q_proj.weight' )] if k_pre not in capture_qkv_weight: SCREAMING_SNAKE_CASE = [None, None, None] SCREAMING_SNAKE_CASE = v continue if ( k.endswith('.self_attn.q_proj.bias' ) or k.endswith('.self_attn.k_proj.bias' ) or k.endswith('.self_attn.v_proj.bias' ) ): SCREAMING_SNAKE_CASE = k[: -len('.q_proj.bias' )] SCREAMING_SNAKE_CASE = k[-len('q_proj.bias' )] if k_pre not in capture_qkv_bias: SCREAMING_SNAKE_CASE = [None, None, None] SCREAMING_SNAKE_CASE = v continue SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda _UpperCamelCase : protected[re.escape(m.group(0 ) )] , _UpperCamelCase ) SCREAMING_SNAKE_CASE = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' ) SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda _UpperCamelCase : protected[re.escape(m.group(0 ) )] , _UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.cat(_UpperCamelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' ) SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda _UpperCamelCase : protected[re.escape(m.group(0 ) )] , _UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.cat(_UpperCamelCase ) return new_state_dict def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]: '''simple docstring''' return text_enc_dict if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt." ) a_ : Any = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors a_ : List[str] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors") a_ : Dict = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors") a_ : Tuple = osp.join(args.model_path, "text_encoder", "model.safetensors") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): a_ : Optional[Any] = load_file(unet_path, device="cpu") else: a_ : List[Any] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin") a_ : Dict = torch.load(unet_path, map_location="cpu") if osp.exists(vae_path): a_ : Tuple = load_file(vae_path, device="cpu") else: a_ : Tuple = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin") a_ : int = torch.load(vae_path, map_location="cpu") if osp.exists(text_enc_path): a_ : Tuple = load_file(text_enc_path, device="cpu") else: a_ : str = osp.join(args.model_path, "text_encoder", "pytorch_model.bin") a_ : List[str] = torch.load(text_enc_path, map_location="cpu") # Convert the UNet model a_ : str = convert_unet_state_dict(unet_state_dict) a_ : Any = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} # Convert the VAE model a_ : Dict = convert_vae_state_dict(vae_state_dict) a_ : str = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper a_ : Union[str, Any] = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm a_ : Union[str, Any] = {"transformer." + k: v for k, v in text_enc_dict.items()} a_ : Dict = convert_text_enc_state_dict_vaa(text_enc_dict) a_ : List[Any] = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()} else: a_ : Optional[int] = convert_text_enc_state_dict(text_enc_dict) a_ : Optional[int] = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint a_ : List[str] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: a_ : Any = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: a_ : Optional[int] = {"state_dict": state_dict} torch.save(state_dict, args.checkpoint_path)
673
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]: '''simple docstring''' with open(_UpperCamelCase , 'r' ) as fh: fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX ) try: print(*_UpperCamelCase ) finally: fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN ) a_ : int = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) a_ : str = torch.device("cuda", local_rank) a_ : Optional[int] = socket.gethostname() a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank a_ : Dict = dist.get_rank() a_ : Any = dist.get_world_size() printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(F"""{gpu} is broken""") raise
673
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[Any]: lowercase : Union[str, Any] ='''huggingface/label-files''' lowercase : Optional[int] ='''imagenet-1k-id2label.json''' lowercase : Optional[Any] =json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) ) lowercase : List[Any] ={int(__magic_name__ ): v for k, v in idalabel.items()} lowercase : Any ={v: k for k, v in idalabel.items()} lowercase : Union[str, Any] ='''std_conv''' if '''bit''' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase : Any =BitConfig( conv_layer=__magic_name__ , num_labels=1000 , idalabel=__magic_name__ , labelaid=__magic_name__ , ) return config def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> List[str]: if "stem.conv" in name: lowercase : Tuple =name.replace('''stem.conv''' , '''bit.embedder.convolution''' ) if "blocks" in name: lowercase : Union[str, Any] =name.replace('''blocks''' , '''layers''' ) if "head.fc" in name: lowercase : Tuple =name.replace('''head.fc''' , '''classifier.1''' ) if name.startswith('''norm''' ): lowercase : Tuple ='''bit.''' + name if "bit" not in name and "classifier" not in name: lowercase : Optional[int] ='''bit.encoder.''' + name return name def _lowerCAmelCase ( ) -> Tuple: lowercase : int ='''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase : Any =Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[Any]=False ) -> Tuple: lowercase : Optional[Any] =get_config(__magic_name__ ) # load original model from timm lowercase : Dict =create_model(__magic_name__ , pretrained=__magic_name__ ) timm_model.eval() # load state_dict of original model lowercase : List[str] =timm_model.state_dict() for key in state_dict.copy().keys(): lowercase : Union[str, Any] =state_dict.pop(__magic_name__ ) lowercase : List[Any] =val.squeeze() if '''head''' in key else val # load HuggingFace model lowercase : str =BitForImageClassification(__magic_name__ ) model.eval() model.load_state_dict(__magic_name__ ) # create image processor lowercase : str =create_transform(**resolve_data_config({} , model=__magic_name__ ) ) lowercase : int =transform.transforms lowercase : Union[str, Any] ={ '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowercase : Optional[Any] =BitImageProcessor( do_resize=__magic_name__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase : Optional[Any] =prepare_img() lowercase : Optional[Any] =transform(__magic_name__ ).unsqueeze(0 ) lowercase : Optional[Any] =processor(__magic_name__ , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(__magic_name__ , __magic_name__ ) # verify logits with torch.no_grad(): lowercase : List[str] =model(__magic_name__ ) lowercase : Any =outputs.logits print('''Logits:''' , logits[0, :3] ) print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase : List[str] =timm_model(__magic_name__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) UpperCamelCase_ = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
92
from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCAmelCase = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
651
0
"""simple docstring""" import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __lowercase = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def lowerCAmelCase (__UpperCamelCase : Any ): """simple docstring""" __UpperCamelCase ={} state_dict.pop('''pixel_mean''' , UpperCamelCase__ ) state_dict.pop('''pixel_std''' , UpperCamelCase__ ) __UpperCamelCase =r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __UpperCamelCase =key.replace(UpperCamelCase__ , UpperCamelCase__ ) if re.match(UpperCamelCase__ , UpperCamelCase__ ): __UpperCamelCase =int(re.match(UpperCamelCase__ , UpperCamelCase__ ).group(2 ) ) if layer_nb == 0: __UpperCamelCase =key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: __UpperCamelCase =key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: __UpperCamelCase =key.replace('''layers.2''' , '''proj_out''' ) __UpperCamelCase =value __UpperCamelCase =model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def lowerCAmelCase (__UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : int="ybelkada/segment-anything" ): """simple docstring""" __UpperCamelCase =hf_hub_download(UpperCamelCase__ , F"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: __UpperCamelCase =SamConfig() elif "sam_vit_l" in model_name: __UpperCamelCase =SamVisionConfig( hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , ) __UpperCamelCase =SamConfig( vision_config=UpperCamelCase__ , ) elif "sam_vit_h" in model_name: __UpperCamelCase =SamVisionConfig( hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , ) __UpperCamelCase =SamConfig( vision_config=UpperCamelCase__ , ) __UpperCamelCase =torch.load(UpperCamelCase__ , map_location='''cpu''' ) __UpperCamelCase =replace_keys(UpperCamelCase__ ) __UpperCamelCase =SamImageProcessor() __UpperCamelCase =SamProcessor(image_processor=UpperCamelCase__ ) __UpperCamelCase =SamModel(UpperCamelCase__ ) hf_model.load_state_dict(UpperCamelCase__ ) __UpperCamelCase =hf_model.to('''cuda''' ) __UpperCamelCase ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' __UpperCamelCase =Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' ) __UpperCamelCase =[[[4_0_0, 6_5_0]]] __UpperCamelCase =[[1]] __UpperCamelCase =processor(images=np.array(UpperCamelCase__ ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __UpperCamelCase =hf_model(**UpperCamelCase__ ) __UpperCamelCase =output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8 __UpperCamelCase =processor( images=np.array(UpperCamelCase__ ) , input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __UpperCamelCase =hf_model(**UpperCamelCase__ ) __UpperCamelCase =output.iou_scores.squeeze() assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4 __UpperCamelCase =((7_5, 2_7_5, 1_7_2_5, 8_5_0),) __UpperCamelCase =processor(images=np.array(UpperCamelCase__ ) , input_boxes=UpperCamelCase__ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __UpperCamelCase =hf_model(**UpperCamelCase__ ) __UpperCamelCase =output.iou_scores.squeeze() assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4 # Test with 2 points and 1 image. __UpperCamelCase =[[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]] __UpperCamelCase =[[1, 1]] __UpperCamelCase =processor( images=np.array(UpperCamelCase__ ) , input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __UpperCamelCase =hf_model(**UpperCamelCase__ ) __UpperCamelCase =output.iou_scores.squeeze() assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2 if __name__ == "__main__": __lowercase = argparse.ArgumentParser() __lowercase = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( '''--model_name''', default='''sam_vit_h_4b8939''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) parser.add_argument( '''--model_hub_id''', default='''ybelkada/segment-anything''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) __lowercase = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
703
"""simple docstring""" import sys __lowercase = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def lowerCAmelCase (__UpperCamelCase : str = N ): """simple docstring""" __UpperCamelCase =-sys.maxsize - 1 for i in range(len(__UpperCamelCase ) - 1_2 ): __UpperCamelCase =1 for j in range(1_3 ): product *= int(n[i + j] ) if product > largest_product: __UpperCamelCase =product return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
296
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {} class _UpperCAmelCase ( _lowerCamelCase ): a = '''llama''' a = ['''past_key_values'''] def __init__( self , a__=32000 , a__=4096 , a__=11008 , a__=32 , a__=32 , a__=None , a__="silu" , a__=2048 , a__=0.02 , a__=1E-6 , a__=True , a__=0 , a__=1 , a__=2 , a__=1 , a__=False , a__=None , **a__ , ): A_ : List[Any] = vocab_size A_ : Union[str, Any] = max_position_embeddings A_ : Union[str, Any] = hidden_size A_ : Any = intermediate_size A_ : int = num_hidden_layers A_ : Any = num_attention_heads # for backward compatibility if num_key_value_heads is None: A_ : Dict = num_attention_heads A_ : Dict = num_key_value_heads A_ : List[Any] = hidden_act A_ : Optional[Any] = initializer_range A_ : int = rms_norm_eps A_ : Tuple = pretraining_tp A_ : Optional[Any] = use_cache A_ : Tuple = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , tie_word_embeddings=a__ , **a__ , ) def _lowerCamelCase ( self ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , a__ ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F"""got {self.rope_scaling}""" ) A_ : Optional[int] = self.rope_scaling.get("""type""" , a__ ) A_ : Any = self.rope_scaling.get("""factor""" , a__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(a__ , a__ ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
569
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ): a = CodeGenTokenizer a = CodeGenTokenizerFast a = True a = {'''add_prefix_space''': True} a = False def _lowerCamelCase ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A_ : Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] A_ : Union[str, Any] = dict(zip(a__ , range(len(a__ ) ) ) ) A_ : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] A_ : Tuple = {"""unk_token""": """<unk>"""} A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(a__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(a__ ) ) def _lowerCamelCase ( self , **a__ ): kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ ) def _lowerCamelCase ( self , **a__ ): kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ ) def _lowerCamelCase ( self , a__ ): A_ : str = """lower newer""" A_ : Optional[int] = """lower newer""" return input_text, output_text def _lowerCamelCase ( self ): A_ : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A_ : List[Any] = """lower newer""" A_ : int = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] A_ : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ ) self.assertListEqual(a__ , a__ ) A_ : int = tokens + [tokenizer.unk_token] A_ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) def _lowerCamelCase ( self ): if not self.test_rust_tokenizer: return A_ : Optional[Any] = self.get_tokenizer() A_ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=a__ ) A_ : Dict = """lower newer""" # Testing tokenization A_ : List[str] = tokenizer.tokenize(a__ , add_prefix_space=a__ ) A_ : Dict = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) # Testing conversion to ids without special tokens A_ : int = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ ) A_ : Any = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) # Testing conversion to ids with special tokens A_ : Any = self.get_rust_tokenizer(add_prefix_space=a__ ) A_ : Dict = tokenizer.encode(a__ , add_prefix_space=a__ ) A_ : Union[str, Any] = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) # Testing the unknown token A_ : List[Any] = tokens + [rust_tokenizer.unk_token] A_ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ ) def _lowerCamelCase ( self , *a__ , **a__ ): # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def _lowerCamelCase ( self , a__=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): A_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ ) # Simple input A_ : str = """This is a simple input""" A_ : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""] A_ : Optional[int] = ("""This is a simple input""", """This is a pair""") A_ : Optional[int] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" ) # Simple input self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" ) # Simple input self.assertRaises( a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , ) # Pair input self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" ) # Pair input self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" ) # Pair input self.assertRaises( a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , ) def _lowerCamelCase ( self ): A_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input A_ : Optional[int] = """This is a simple input""" A_ : int = ["""This is a simple input looooooooong""", """This is a simple input"""] A_ : str = ("""This is a simple input""", """This is a pair""") A_ : int = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] A_ : Optional[Any] = tokenizer.pad_token_id A_ : Optional[Any] = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" ) A_ : List[Any] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" ) A_ : str = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" ) A_ : List[Any] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def _lowerCamelCase ( self ): A_ : Tuple = """$$$""" A_ : int = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ ) A_ : Optional[int] = """This is a simple input""" A_ : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""] A_ : Optional[Any] = tokenizer.bos_token_id A_ : Optional[int] = tokenizer(a__ ) A_ : Dict = tokenizer(a__ ) self.assertEqual(out_s.input_ids[0] , a__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) A_ : int = tokenizer.decode(out_s.input_ids ) A_ : Any = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , a__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def _lowerCamelCase ( self ): A_ : List[str] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" ) A_ : List[str] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#""" A_ : Dict = """\nif len_a > len_b: result = a\nelse: result = b""" A_ : Any = tokenizer.encode(a__ ) A_ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""] A_ : Union[str, Any] = tokenizer.decode(a__ , truncate_before_pattern=a__ ) self.assertEqual(a__ , a__ ) def _lowerCamelCase ( self ): pass
569
1
from math import pow def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Dict: if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count UpperCamelCase_ = int(pow(_lowercase , _lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n UpperCamelCase_ = backtrack( _lowercase , _lowercase , current_number + 1 , _lowercase , _lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. UpperCamelCase_ = backtrack( _lowercase , _lowercase , current_number + 1 , _lowercase , _lowercase ) return current_sum, solutions_count def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( "Invalid input\n" "needed_sum must be between 1 and 1000, power between 2 and 10." ) return backtrack(_lowercase , _lowercase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
712
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = '▁' _UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'} _UpperCAmelCase = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model' ), } } _UpperCAmelCase = { 'facebook/nllb-200-distilled-600M': 1_0_2_4, } # fmt: off _UpperCAmelCase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class _UpperCamelCase ( lowerCAmelCase_ ): _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask'''] _UpperCamelCase : List[int] = [] _UpperCamelCase : List[int] = [] def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]="<s>" , _SCREAMING_SNAKE_CASE: Optional[int]="</s>" , _SCREAMING_SNAKE_CASE: int="</s>" , _SCREAMING_SNAKE_CASE: Union[str, Any]="<s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: Union[str, Any]="<pad>" , _SCREAMING_SNAKE_CASE: int="<mask>" , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Tuple=False , **_SCREAMING_SNAKE_CASE: List[str] , ) -> Tuple: """simple docstring""" UpperCamelCase_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs UpperCamelCase_ = legacy_behaviour super().__init__( bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token UpperCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCamelCase_ = 1 UpperCamelCase_ = len(self.sp_model ) UpperCamelCase_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE ) } UpperCamelCase_ = {v: k for k, v in self.lang_code_to_id.items()} UpperCamelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) UpperCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} UpperCamelCase_ = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) UpperCamelCase_ = src_lang if src_lang is not None else "eng_Latn" UpperCamelCase_ = self.lang_code_to_id[self._src_lang] UpperCamelCase_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self: Any ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ = self.__dict__.copy() UpperCamelCase_ = None UpperCamelCase_ = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Tuple: """simple docstring""" UpperCamelCase_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCamelCase_ = {} UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowercase ( self: Union[str, Any] ) -> Dict: """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowercase ( self: Union[str, Any] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> None: """simple docstring""" UpperCamelCase_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = [1] * len(self.prefix_tokens ) UpperCamelCase_ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCamelCase_ = [self.sep_token_id] UpperCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] , _SCREAMING_SNAKE_CASE: Optional[str] , **_SCREAMING_SNAKE_CASE: Tuple ) -> int: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCamelCase_ = src_lang UpperCamelCase_ = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ = tgt_lang_id return inputs def lowercase ( self: Tuple ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE ) def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: str ) -> Optional[int]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase_ = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ = "".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , " " ).strip() return out_string def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase_ = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi: UpperCamelCase_ = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str = "eng_Latn" , _SCREAMING_SNAKE_CASE: Optional[List[str]] = None , _SCREAMING_SNAKE_CASE: str = "fra_Latn" , **_SCREAMING_SNAKE_CASE: List[str] , ) -> BatchEncoding: """simple docstring""" UpperCamelCase_ = src_lang UpperCamelCase_ = tgt_lang return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def lowercase ( self: Any ) -> Optional[int]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def lowercase ( self: Dict ) -> Optional[int]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Any ) -> None: """simple docstring""" UpperCamelCase_ = self.lang_code_to_id[src_lang] if self.legacy_behaviour: UpperCamelCase_ = [] UpperCamelCase_ = [self.eos_token_id, self.cur_lang_code] else: UpperCamelCase_ = [self.cur_lang_code] UpperCamelCase_ = [self.eos_token_id] def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str ) -> None: """simple docstring""" UpperCamelCase_ = self.lang_code_to_id[lang] if self.legacy_behaviour: UpperCamelCase_ = [] UpperCamelCase_ = [self.eos_token_id, self.cur_lang_code] else: UpperCamelCase_ = [self.cur_lang_code] UpperCamelCase_ = [self.eos_token_id]
371
0
"""simple docstring""" import unittest from knapsack import knapsack as k class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> List[str]: _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Tuple = [0] _lowerCamelCase : Union[str, Any] = [0] _lowerCamelCase : Union[str, Any] = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 ) _lowerCamelCase : Optional[int] = [60] _lowerCamelCase : int = [10] _lowerCamelCase : Tuple = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 ) def a__ ( self ) -> Union[str, Any]: _lowerCamelCase : int = 3 _lowerCamelCase : Tuple = [1, 2, 3] _lowerCamelCase : str = [3, 2, 1] _lowerCamelCase : Any = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 ) def a__ ( self ) -> Optional[int]: _lowerCamelCase : Tuple = 50 _lowerCamelCase : int = [60, 100, 120] _lowerCamelCase : Any = [10, 20, 30] _lowerCamelCase : Union[str, Any] = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 ) if __name__ == "__main__": unittest.main()
434
"""simple docstring""" import argparse import json import subprocess def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any: _lowerCamelCase : List[str] = [] _lowerCamelCase : Optional[int] = ( F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"''' ''' https://api.github.com/repos/huggingface/transformers/actions/runners''' ) _lowerCamelCase : Optional[int] = subprocess.run(SCREAMING_SNAKE_CASE_ , shell=SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE ) _lowerCamelCase : List[str] = output.stdout.decode('''utf-8''' ) _lowerCamelCase : Union[str, Any] = json.loads(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Dict = status['''runners'''] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(SCREAMING_SNAKE_CASE_ ) # save the result so we can report them on Slack with open('''offline_runners.txt''' , '''w''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) > 0: _lowerCamelCase : Tuple = '''\n'''.join([x['''name'''] for x in offline_runners] ) raise ValueError(F'''The following runners are offline:\n{failed}''' ) if __name__ == "__main__": def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]: return values.split(''',''' ) SCREAMING_SNAKE_CASE__ : str =argparse.ArgumentParser() # Required parameters parser.add_argument( '--target_runners', default=None, type=list_str, required=True, help='Comma-separated list of runners to check status.', ) parser.add_argument( '--token', default=None, type=str, required=True, help='A token that has actions:read permission.' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =parser.parse_args() get_runner_status(args.target_runners, args.token)
434
1
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowercase__( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self) -> Tuple: """simple docstring""" UpperCamelCase__ : Union[str, Any] =["a", "b", "c"] # Defaults to last layer if both are None UpperCamelCase__ , UpperCamelCase__ : Any =get_aligned_output_features_output_indices(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) self.assertEqual(__SCREAMING_SNAKE_CASE , ["c"]) self.assertEqual(__SCREAMING_SNAKE_CASE , [2]) # Out indices set to match out features UpperCamelCase__ , UpperCamelCase__ : Dict =get_aligned_output_features_output_indices(["a", "c"] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) self.assertEqual(__SCREAMING_SNAKE_CASE , ["a", "c"]) self.assertEqual(__SCREAMING_SNAKE_CASE , [0, 2]) # Out features set to match out indices UpperCamelCase__ , UpperCamelCase__ : Optional[int] =get_aligned_output_features_output_indices(__SCREAMING_SNAKE_CASE , [0, 2] , __SCREAMING_SNAKE_CASE) self.assertEqual(__SCREAMING_SNAKE_CASE , ["a", "c"]) self.assertEqual(__SCREAMING_SNAKE_CASE , [0, 2]) # Out features selected from negative indices UpperCamelCase__ , UpperCamelCase__ : List[str] =get_aligned_output_features_output_indices(__SCREAMING_SNAKE_CASE , [-3, -1] , __SCREAMING_SNAKE_CASE) self.assertEqual(__SCREAMING_SNAKE_CASE , ["a", "c"]) self.assertEqual(__SCREAMING_SNAKE_CASE , [-3, -1]) def UpperCAmelCase ( self) -> Dict: """simple docstring""" with self.assertRaises(__SCREAMING_SNAKE_CASE): verify_out_features_out_indices(["a", "b"] , (0, 1) , __SCREAMING_SNAKE_CASE) # Out features must be a list with self.assertRaises(__SCREAMING_SNAKE_CASE): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"]) # Out features must be a subset of stage names with self.assertRaises(__SCREAMING_SNAKE_CASE): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"]) # Out indices must be a list or tuple with self.assertRaises(__SCREAMING_SNAKE_CASE): verify_out_features_out_indices(__SCREAMING_SNAKE_CASE , 0 , ["a", "b"]) # Out indices must be a subset of stage names with self.assertRaises(__SCREAMING_SNAKE_CASE): verify_out_features_out_indices(__SCREAMING_SNAKE_CASE , (0, 1) , ["a"]) # Out features and out indices must be the same length with self.assertRaises(__SCREAMING_SNAKE_CASE): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"]) # Out features should match out indices with self.assertRaises(__SCREAMING_SNAKE_CASE): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"]) # Out features and out indices should be in order with self.assertRaises(__SCREAMING_SNAKE_CASE): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"]) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"]) def UpperCAmelCase ( self) -> Any: """simple docstring""" UpperCamelCase__ : Any =BackboneMixin() UpperCamelCase__ : Optional[int] =["a", "b", "c"] UpperCamelCase__ : List[Any] =["a", "c"] UpperCamelCase__ : Union[str, Any] =[0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly UpperCamelCase__ : Dict =["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"]) self.assertEqual(backbone.out_indices , [0, 1]) UpperCamelCase__ : Union[str, Any] =[-3, -1] self.assertEqual(backbone.out_features , ["a", "c"]) self.assertEqual(backbone.out_indices , [-3, -1])
582
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _lowerCamelCase ( ) -> int: '''simple docstring''' UpperCamelCase__ : str =ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=A_ , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=A_ , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=A_ ) return parser.parse_args() def _lowerCamelCase ( ) -> List[str]: '''simple docstring''' UpperCamelCase__ : Dict =parse_args() # Import training_script as a module. UpperCamelCase__ : Union[str, Any] =Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) UpperCamelCase__ : Optional[Any] =script_fpath.stem UpperCamelCase__ : Any =importlib.import_module(A_ ) # Patch sys.argv UpperCamelCase__ : Optional[Any] =[args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
582
1
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ =OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) lowerCAmelCase__ =OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) lowerCAmelCase__ =OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) lowerCAmelCase__ =OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) lowerCAmelCase__ =OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) lowerCAmelCase__ =OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) lowerCAmelCase__ =OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) lowerCAmelCase__ =OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) lowerCAmelCase__ =OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) lowerCAmelCase__ =OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) lowerCAmelCase__ =OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) lowerCAmelCase__ =OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) lowerCAmelCase__ =OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) lowerCAmelCase__ =OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) lowerCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowerCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowerCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowerCAmelCase__ =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowerCAmelCase__ =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowerCAmelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowerCAmelCase__ =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase__ =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowerCAmelCase__ =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowerCAmelCase__ =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowerCAmelCase__ =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowerCAmelCase__ =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowerCAmelCase__ =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_MAPPING lowerCAmelCase__ =auto_class_update(FlaxAutoModel) class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowerCAmelCase__ =auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowerCAmelCase__ =auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowerCAmelCase__ =auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase__ =auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCAmelCase__ =auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowerCAmelCase__ =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCAmelCase__ =auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowerCAmelCase__ =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowerCAmelCase__ =auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase__ =auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class A__( _BaseAutoModelClass ): lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowerCAmelCase__ =auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
482
"""simple docstring""" import argparse import os import re import packaging.version lowerCAmelCase__ ="examples/" lowerCAmelCase__ ={ "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } lowerCAmelCase__ ={ "init": "src/transformers/__init__.py", "setup": "setup.py", } lowerCAmelCase__ ="README.md" def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]: with open(UpperCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern] __SCREAMING_SNAKE_CASE = replace.replace('''VERSION''' , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = re_pattern.sub(UpperCAmelCase__ , UpperCAmelCase__ ) with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(UpperCAmelCase__ ) def _a ( UpperCAmelCase__ ) -> Any: for folder, directories, fnames in os.walk(UpperCAmelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , pattern='''examples''' ) def _a ( UpperCAmelCase__ , UpperCAmelCase__=False ) -> List[Any]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if not patch: update_version_in_examples(UpperCAmelCase__ ) def _a ( ) -> Dict: __SCREAMING_SNAKE_CASE = '''🤗 Transformers currently provides the following architectures''' __SCREAMING_SNAKE_CASE = '''1. Want to contribute a new model?''' with open(UpperCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE = f.readlines() # Find the start of the list. __SCREAMING_SNAKE_CASE = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __SCREAMING_SNAKE_CASE = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __SCREAMING_SNAKE_CASE = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(UpperCAmelCase__ ) def _a ( ) -> Tuple: with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase__ ).groups()[0] return packaging.version.parse(UpperCAmelCase__ ) def _a ( UpperCAmelCase__=False ) -> Dict: __SCREAMING_SNAKE_CASE = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __SCREAMING_SNAKE_CASE = default_version.base_version elif patch: __SCREAMING_SNAKE_CASE = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __SCREAMING_SNAKE_CASE = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __SCREAMING_SNAKE_CASE = input(f"""Which version are you releasing? [{default_version}]""" ) if len(UpperCAmelCase__ ) == 0: __SCREAMING_SNAKE_CASE = default_version print(f"""Updating version to {version}.""" ) global_version_update(UpperCAmelCase__ , patch=UpperCAmelCase__ ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _a ( ) -> Optional[int]: __SCREAMING_SNAKE_CASE = get_version() __SCREAMING_SNAKE_CASE = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __SCREAMING_SNAKE_CASE = current_version.base_version # Check with the user we got that right. __SCREAMING_SNAKE_CASE = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(UpperCAmelCase__ ) == 0: __SCREAMING_SNAKE_CASE = dev_version print(f"""Updating version to {version}.""" ) global_version_update(UpperCAmelCase__ ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") lowerCAmelCase__ =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
482
1
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' UpperCamelCase = None UpperCamelCase = None @property def snake_case__ ( self : Tuple ): '''simple docstring''' return self.feat_extract_tester.prepare_feat_extract_dict() def snake_case__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(a_ , '''feature_size''' ) ) self.assertTrue(hasattr(a_ , '''sampling_rate''' ) ) self.assertTrue(hasattr(a_ , '''padding_value''' ) ) def snake_case__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common() __UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCAmelCase : Tuple = feat_extract.model_input_names[0] __UpperCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) ) __UpperCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ ) __UpperCAmelCase : str = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) __UpperCAmelCase : str = processed_features[input_name] if len(batch_features_input.shape ) < 3: __UpperCAmelCase : Optional[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def snake_case__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ ) __UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCAmelCase : List[str] = feat_extract.model_input_names[0] __UpperCAmelCase : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) __UpperCAmelCase : int = processed_features[input_name] if len(batch_features_input.shape ) < 3: __UpperCAmelCase : Optional[int] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def snake_case__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ ) __UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCAmelCase : Optional[Any] = feat_extract.model_input_names[0] __UpperCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' ) __UpperCAmelCase : Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: __UpperCAmelCase : Dict = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def snake_case__ ( self : str , a_ : Any=False ): '''simple docstring''' def _inputs_have_equal_length(a_ : str ): __UpperCAmelCase : Optional[Any] = len(input[0] ) for input_slice in input[1:]: if len(a_ ) != length: return False return True def _inputs_are_equal(a_ : Any , a_ : Optional[int] ): if len(a_ ) != len(a_ ): return False for input_slice_a, input_slice_a in zip(a_ , a_ ): if not np.allclose(np.asarray(a_ ) , np.asarray(a_ ) , atol=1e-3 ): return False return True __UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCAmelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_common(numpify=a_ ) __UpperCAmelCase : Dict = feat_extract.model_input_names[0] __UpperCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) __UpperCAmelCase : List[str] = self.feat_extract_tester.seq_length_diff __UpperCAmelCase : Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff __UpperCAmelCase : Tuple = self.feat_extract_tester.min_seq_length __UpperCAmelCase : Optional[Any] = self.feat_extract_tester.batch_size __UpperCAmelCase : List[str] = self.feat_extract_tester.feature_size # test padding for List[int] + numpy __UpperCAmelCase : int = feat_extract.pad(a_ , padding=a_ ) __UpperCAmelCase : str = input_a[input_name] __UpperCAmelCase : Union[str, Any] = feat_extract.pad(a_ , padding='''longest''' ) __UpperCAmelCase : Optional[int] = input_a[input_name] __UpperCAmelCase : Optional[int] = feat_extract.pad(a_ , padding='''max_length''' , max_length=len(speech_inputs[-1] ) ) __UpperCAmelCase : Any = input_a[input_name] __UpperCAmelCase : Optional[Any] = feat_extract.pad(a_ , padding='''longest''' , return_tensors='''np''' ) __UpperCAmelCase : Dict = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(a_ ): feat_extract.pad(a_ , padding='''max_length''' )[input_name] __UpperCAmelCase : str = feat_extract.pad( a_ , padding='''max_length''' , max_length=a_ , return_tensors='''np''' ) __UpperCAmelCase : Tuple = input_a[input_name] self.assertFalse(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_are_equal(a_ , a_ ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy __UpperCAmelCase : Any = feat_extract.pad(a_ , pad_to_multiple_of=10 ) __UpperCAmelCase : str = input_a[input_name] __UpperCAmelCase : List[str] = feat_extract.pad(a_ , padding='''longest''' , pad_to_multiple_of=10 ) __UpperCAmelCase : List[Any] = input_a[input_name] __UpperCAmelCase : Union[str, Any] = feat_extract.pad( a_ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=a_ ) __UpperCAmelCase : Any = input_a[input_name] __UpperCAmelCase : Tuple = feat_extract.pad( a_ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=a_ , return_tensors='''np''' , ) __UpperCAmelCase : List[str] = input_a[input_name] self.assertTrue(all(len(a_ ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(a_ , a_ ) ) __UpperCAmelCase : Dict = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(a_ ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct __UpperCAmelCase : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1e-3 ) def snake_case__ ( self : Dict , a_ : int=False ): '''simple docstring''' def _inputs_have_equal_length(a_ : Dict ): __UpperCAmelCase : Any = len(input[0] ) for input_slice in input[1:]: if len(a_ ) != length: return False return True def _inputs_are_equal(a_ : int , a_ : Union[str, Any] ): if len(a_ ) != len(a_ ): return False for input_slice_a, input_slice_a in zip(a_ , a_ ): if not np.allclose(np.asarray(a_ ) , np.asarray(a_ ) , atol=1e-3 ): return False return True __UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=a_ ) __UpperCAmelCase : int = feat_extract.model_input_names[0] __UpperCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) # truncate to smallest __UpperCAmelCase : Optional[Any] = feat_extract.pad( a_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=a_ ) __UpperCAmelCase : Optional[Any] = input_a[input_name] __UpperCAmelCase : Optional[int] = feat_extract.pad(a_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) ) __UpperCAmelCase : List[str] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertFalse(_inputs_have_equal_length(a_ ) ) # truncate to smallest with np __UpperCAmelCase : Optional[Any] = feat_extract.pad( a_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=a_ , ) __UpperCAmelCase : Optional[Any] = input_a[input_name] __UpperCAmelCase : List[Any] = feat_extract.pad( a_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' ) __UpperCAmelCase : str = input_a[input_name] self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(a_ ) ) # truncate to middle __UpperCAmelCase : str = feat_extract.pad( a_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=a_ , return_tensors='''np''' , ) __UpperCAmelCase : Optional[int] = input_a[input_name] __UpperCAmelCase : Optional[Any] = feat_extract.pad( a_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=a_ ) __UpperCAmelCase : int = input_a[input_name] __UpperCAmelCase : List[str] = feat_extract.pad( a_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' ) __UpperCAmelCase : str = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_are_equal(a_ , a_ ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(a_ ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(a_ ): feat_extract.pad(a_ , truncation=a_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(a_ ): feat_extract.pad(a_ , padding='''longest''' , truncation=a_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(a_ ): feat_extract.pad(a_ , padding='''longest''' , truncation=a_ )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(a_ ): feat_extract.pad(a_ , padding='''max_length''' , truncation=a_ )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy __UpperCAmelCase : Dict = 12 __UpperCAmelCase : int = feat_extract.pad( a_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=a_ , truncation=a_ , ) __UpperCAmelCase : Tuple = input_a[input_name] __UpperCAmelCase : str = feat_extract.pad( a_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=a_ , ) __UpperCAmelCase : Optional[Any] = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of __UpperCAmelCase : Optional[int] = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: __UpperCAmelCase : List[str] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertFalse(_inputs_have_equal_length(a_ ) ) def snake_case__ ( self : Optional[Any] ): '''simple docstring''' self._check_padding(numpify=a_ ) def snake_case__ ( self : Union[str, Any] ): '''simple docstring''' self._check_padding(numpify=a_ ) def snake_case__ ( self : int ): '''simple docstring''' self._check_truncation(numpify=a_ ) def snake_case__ ( self : str ): '''simple docstring''' self._check_truncation(numpify=a_ ) @require_torch def snake_case__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common() __UpperCAmelCase : int = feat_extract.model_input_names[0] __UpperCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} ) __UpperCAmelCase : Any = feat_extract.pad(a_ , padding='''longest''' , return_tensors='''np''' )[input_name] __UpperCAmelCase : str = feat_extract.pad(a_ , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) @require_tf def snake_case__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) __UpperCAmelCase : str = self.feat_extract_tester.prepare_inputs_for_common() __UpperCAmelCase : Optional[Any] = feat_extract.model_input_names[0] __UpperCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} ) __UpperCAmelCase : str = feat_extract.pad(a_ , padding='''longest''' , return_tensors='''np''' )[input_name] __UpperCAmelCase : Union[str, Any] = feat_extract.pad(a_ , padding='''longest''' , return_tensors='''tf''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def snake_case__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.feat_extract_dict __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Any = self.feature_extraction_class(**a_ ) __UpperCAmelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() __UpperCAmelCase : str = [len(a_ ) for x in speech_inputs] __UpperCAmelCase : Tuple = feat_extract.model_input_names[0] __UpperCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} ) __UpperCAmelCase : Any = feat_extract.pad(a_ , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , a_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ ) def snake_case__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.feat_extract_dict __UpperCAmelCase : Tuple = True __UpperCAmelCase : int = self.feature_extraction_class(**a_ ) __UpperCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common() __UpperCAmelCase : Any = [len(a_ ) for x in speech_inputs] __UpperCAmelCase : List[str] = feat_extract.model_input_names[0] __UpperCAmelCase : int = BatchFeature({input_name: speech_inputs} ) __UpperCAmelCase : List[Any] = min(a_ ) __UpperCAmelCase : str = feat_extract.pad( a_ , padding='''max_length''' , max_length=a_ , truncation=a_ , return_tensors='''np''' ) self.assertIn('''attention_mask''' , a_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
241
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class UpperCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' ) __UpperCAmelCase : Dict = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __UpperCAmelCase : int = model(a_ )['''last_hidden_state'''] __UpperCAmelCase : Optional[int] = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , a_ ) # compare the actual values for a slice. __UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
241
1
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class __lowerCamelCase : """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 13 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = 128 , SCREAMING_SNAKE_CASE__ : int=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE__ : int = 7 , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 37 , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 128 , SCREAMING_SNAKE_CASE__ : List[int] = [2, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> Tuple: lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = encoder_stride lowerCAmelCase__ = num_attention_outputs lowerCAmelCase__ = embed_dim lowerCAmelCase__ = embed_dim + 1 lowerCAmelCase__ = resolution lowerCAmelCase__ = depths lowerCAmelCase__ = hidden_sizes lowerCAmelCase__ = dim lowerCAmelCase__ = mlp_expansion_ratio def a ( self : int ) -> Optional[Any]: lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels def a ( self : int ) -> List[str]: return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any: lowerCAmelCase__ = TFEfficientFormerModel(config=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int: lowerCAmelCase__ = self.type_sequence_label_size lowerCAmelCase__ = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a ( self : Dict ) -> Optional[Any]: lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) snake_case__ = ( { "feature-extraction": TFEfficientFormerModel, "image-classification": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) snake_case__ = False snake_case__ = False snake_case__ = False snake_case__ = False snake_case__ = False def a ( self : List[str] ) -> int: lowerCAmelCase__ = TFEfficientFormerModelTester(self ) lowerCAmelCase__ = ConfigTester( self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def a ( self : Any ) -> Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds" ) def a ( self : str ) -> Dict: pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings" ) def a ( self : Optional[Any] ) -> List[str]: pass def a ( self : Union[str, Any] ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ ) def a ( self : List[str] ) -> Union[str, Any]: def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ): lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase__ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) if hasattr(self.model_tester , "encoder_seq_length" ): lowerCAmelCase__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1: lowerCAmelCase__ = seq_length * self.model_tester.chunk_length else: lowerCAmelCase__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: lowerCAmelCase__ = outputs.decoder_hidden_states self.asseretIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = getattr(self.model_tester , "decoder_seq_length" , SCREAMING_SNAKE_CASE__ ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = True check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ = True check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Tuple: lowerCAmelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def a ( self : List[Any] ) -> List[Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" ) def a ( self : List[str] ) -> Union[str, Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ ) def a ( self : str ) -> Any: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ ) @slow def a ( self : Optional[Any] ) -> str: for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = TFEfficientFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[int] ) -> Optional[Any]: lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ = True lowerCAmelCase__ = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = getattr(self.model_tester , "encoder_seq_length" , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = getattr(self.model_tester , "key_length" , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = getattr(self.model_tester , "chunk_length" , SCREAMING_SNAKE_CASE__ ) if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ): lowerCAmelCase__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = True lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase__ = True lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def a ( self : Union[str, Any] ) -> Any: # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes lowerCAmelCase__ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=SCREAMING_SNAKE_CASE__ ) for key, val in model.input_signature.items() if key in model.dummy_inputs } lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ) self.assertTrue(outputs_dict is not None ) def _A ( ): """simple docstring""" lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self : Dict ) -> Dict: return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" ) if is_vision_available() else None ) @slow def a ( self : str ) -> Optional[Any]: lowerCAmelCase__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="tf" ) # forward pass lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ ) # verify the logits lowerCAmelCase__ = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) @slow def a ( self : Tuple ) -> Union[str, Any]: lowerCAmelCase__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="tf" ) # forward pass lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ ) # verify the logits lowerCAmelCase__ = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
61
"""simple docstring""" def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" try: _UpperCAmelCase = float(UpperCamelCase__ ) except ValueError: raise ValueError("Please enter a valid number" ) _UpperCAmelCase = decimal - int(UpperCamelCase__ ) if fractional_part == 0: return int(UpperCamelCase__ ), 1 else: _UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] ) _UpperCAmelCase = int(decimal * (10**number_of_frac_digits) ) _UpperCAmelCase = 10**number_of_frac_digits _UpperCAmelCase , _UpperCAmelCase = denominator, numerator while True: _UpperCAmelCase = dividend % divisor if remainder == 0: break _UpperCAmelCase , _UpperCAmelCase = divisor, remainder _UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor return int(UpperCamelCase__ ), int(UpperCamelCase__ ) if __name__ == "__main__": print(f'''{decimal_to_fraction(2) = }''') print(f'''{decimal_to_fraction(89.0) = }''') print(f'''{decimal_to_fraction("67") = }''') print(f'''{decimal_to_fraction("45.0") = }''') print(f'''{decimal_to_fraction(1.5) = }''') print(f'''{decimal_to_fraction("6.25") = }''') print(f'''{decimal_to_fraction("78td") = }''')
657
0
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _snake_case ( snake_case__ : List[Any] ): A = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] A = True if 'large' in model_name or 'huge' in model_name else False A = True if 'large' in model_name or 'huge' in model_name else False A = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: A = [3, 3, 3, 3] A = [5, 5, 5, 5] elif "fl4" in model_name: A = [4, 4, 4, 4] A = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: A = [3, 3, 3, 3] if "lrf" in model_name: A = [3, 3, 3, 3] else: A = [2, 2, 2, 2] if "tiny" in model_name: A = 96 elif "small" in model_name: A = 96 elif "base" in model_name: A = 128 elif "large" in model_name: A = 192 elif "xlarge" in model_name: A = 256 elif "huge" in model_name: A = 352 # set label information A = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: A = 'imagenet-22k-id2label.json' else: A = 'imagenet-1k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = {v: k for k, v in idalabel.items()} A = FocalNetConfig( embed_dim=snake_case__ , depths=snake_case__ , focal_levels=snake_case__ , focal_windows=snake_case__ , use_conv_embed=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , use_post_layernorm=snake_case__ , use_layerscale=snake_case__ , ) return config def _snake_case ( snake_case__ : int ): if "patch_embed.proj" in name: A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: A = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: A = 'encoder.' + name if "encoder.layers" in name: A = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: A = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: A = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: A = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: A = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: A = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": A = 'layernorm.weight' if name == "norm.bias": A = 'layernorm.bias' if "head" in name: A = name.replace('head' , 'classifier' ) else: A = 'focalnet.' + name return name def _snake_case ( snake_case__ : str , snake_case__ : Any , snake_case__ : Dict=False ): # fmt: off A = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on A = model_name_to_url[model_name] print('Checkpoint URL: ' , snake_case__ ) A = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): A = state_dict.pop(snake_case__ ) A = val A = get_focalnet_config(snake_case__ ) A = FocalNetForImageClassification(snake_case__ ) model.eval() # load state dict model.load_state_dict(snake_case__ ) # verify conversion A = 'http://images.cocodataset.org/val2017/000000039769.jpg' A = BitImageProcessor( do_resize=snake_case__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case__ , crop_size=224 , do_normalize=snake_case__ , image_mean=snake_case__ , image_std=snake_case__ , ) A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A = processor(images=snake_case__ , return_tensors='pt' ) A = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) A = image_transforms(snake_case__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , snake_case__ , atol=1e-4 ) A = model(**snake_case__ ) A = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": A = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": A = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": A = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": A = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": A = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": A = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) if push_to_hub: print(F'Pushing model and processor of {model_name} to the hub...' ) model.push_to_hub(F'{model_name}' ) processor.push_to_hub(F'{model_name}' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) _lowercase = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
710
"""simple docstring""" from math import pi, sqrt def _snake_case ( snake_case__ : float ): if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(snake_case__ ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(snake_case__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _snake_case ( ): assert gamma(0.5 ) == sqrt(snake_case__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _lowercase = 1.0 while num: _lowercase = float(input('''Gamma of: ''')) print(F"""gamma({num}) = {gamma(num)}""") print('''\nEnter 0 to exit...''')
22
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType snake_case_ : List[str] = logging.get_logger(__name__) snake_case_ : Union[str, Any] = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off snake_case_ : int = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377, 1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211, 4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786, 11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791, 17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409, 34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361 ] snake_case_ : int = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627, 3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647, 7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793, 14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675, 22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865, 42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362 ] class lowercase__ ( snake_case_ ): '''simple docstring''' _snake_case = '''whisper''' _snake_case = ['''past_key_values'''] _snake_case = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCamelCase__=5_1_8_6_5 , lowerCamelCase__=8_0 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_5_3_6 , lowerCamelCase__=1_5_3_6 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=5_0_2_5_7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=2_5_6 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_5_0_0 , lowerCamelCase__=4_4_8 , lowerCamelCase__=5_0_2_5_6 , lowerCamelCase__=5_0_2_5_6 , lowerCamelCase__=5_0_2_5_6 , lowerCamelCase__=None , lowerCamelCase__=[2_2_0, 5_0_2_5_6] , lowerCamelCase__=False , lowerCamelCase__=2_5_6 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=1_0 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=1_0 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ): '''simple docstring''' UpperCamelCase = vocab_size UpperCamelCase = num_mel_bins UpperCamelCase = d_model UpperCamelCase = encoder_layers UpperCamelCase = encoder_attention_heads UpperCamelCase = decoder_layers UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_ffn_dim UpperCamelCase = encoder_ffn_dim UpperCamelCase = dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = activation_function UpperCamelCase = init_std UpperCamelCase = encoder_layerdrop UpperCamelCase = decoder_layerdrop UpperCamelCase = use_cache UpperCamelCase = encoder_layers UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True UpperCamelCase = max_source_positions UpperCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. UpperCamelCase = classifier_proj_size UpperCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCamelCase = apply_spec_augment UpperCamelCase = mask_time_prob UpperCamelCase = mask_time_length UpperCamelCase = mask_time_min_masks UpperCamelCase = mask_feature_prob UpperCamelCase = mask_feature_length UpperCamelCase = mask_feature_min_masks UpperCamelCase = median_filter_width super().__init__( pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , ) class lowercase__ ( snake_case_ ): '''simple docstring''' @property def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: UpperCamelCase = {0: '''batch'''} else: UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_snake_case , direction='''inputs''' ) return common_inputs def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 2_2_0_5_0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 2_2_0 , ): '''simple docstring''' UpperCamelCase = OrderedDict() UpperCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , ) UpperCamelCase = encoder_inputs['''input_features'''].shape[2] UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length UpperCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case ) UpperCamelCase = encoder_inputs.pop('''input_features''' ) UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: UpperCamelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def UpperCAmelCase ( self ): '''simple docstring''' return 1e-3
212
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def lowerCamelCase_ ( A_ , A_ ): assert isinstance(A_ , A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowerCamelCase_ ( A_ , A_ , A_ , A_ ): __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCamelCase = SqlDatasetReader( '''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ , keep_in_memory=A_ ).read() _check_sql_dataset(A_ , A_ ) @require_sqlalchemy @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowerCamelCase_ ( A_ , A_ , A_ , A_ ): __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __lowerCamelCase = features.copy() if features else default_expected_features __lowerCamelCase = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=A_ , cache_dir=A_ ).read() _check_sql_dataset(A_ , A_ ) def lowerCamelCase_ ( A_ ): with contextlib.closing(sqlitea.connect(A_ ) ) as con: __lowerCamelCase = con.cursor() cur.execute('''SELECT * FROM dataset''' ) for row in cur: yield row @require_sqlalchemy def lowerCamelCase_ ( A_ , A_ , A_ ): __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = os.path.join(A_ , '''tmp.sql''' ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ ).read() SqlDatasetWriter(A_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write() __lowerCamelCase = iter_sql_file(A_ ) __lowerCamelCase = iter_sql_file(A_ ) for rowa, rowa in zip(A_ , A_ ): assert rowa == rowa @require_sqlalchemy def lowerCamelCase_ ( A_ , A_ , A_ ): __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = os.path.join(A_ , '''tmp.sql''' ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ ).read() SqlDatasetWriter(A_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write() __lowerCamelCase = iter_sql_file(A_ ) __lowerCamelCase = iter_sql_file(A_ ) for rowa, rowa in zip(A_ , A_ ): assert rowa == rowa @require_sqlalchemy def lowerCamelCase_ ( A_ , A_ , A_ ): __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = os.path.join(A_ , '''tmp.sql''' ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A_ ).read() with pytest.raises(A_ ): SqlDatasetWriter(A_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
316
0
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCAmelCase__ : str = logging.get_logger(__name__) class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : int = ['''input_features''', '''attention_mask'''] def __init__(self , SCREAMING_SNAKE_CASE__=80 , SCREAMING_SNAKE_CASE__=1_60_00 , SCREAMING_SNAKE_CASE__=80 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , padding_value=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = num_mel_bins SCREAMING_SNAKE_CASE__ : Optional[Any] = do_ceptral_normalize SCREAMING_SNAKE_CASE__ : Optional[Any] = normalize_means SCREAMING_SNAKE_CASE__ : int = normalize_vars SCREAMING_SNAKE_CASE__ : Any = True def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: SCREAMING_SNAKE_CASE__ : Dict = x[:input_length].mean(axis=0 ) SCREAMING_SNAKE_CASE__ : List[Any] = np.subtract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if normalize_vars: SCREAMING_SNAKE_CASE__ : List[str] = x[:input_length].std(axis=0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.divide(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if input_length < x.shape[0]: SCREAMING_SNAKE_CASE__ : Tuple = padding_value # make sure array is in float32 SCREAMING_SNAKE_CASE__ : List[Any] = x.astype(np.floataa ) return x def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[np.ndarray]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ] def __call__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) SCREAMING_SNAKE_CASE__ : Any = isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) SCREAMING_SNAKE_CASE__ : List[str] = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE__ : str = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ): SCREAMING_SNAKE_CASE__ : Dict = np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE__ : List[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE__ : List[str] = [raw_speech] # extract fbank features SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE__ ) for waveform in raw_speech] # convert into correct format for padding SCREAMING_SNAKE_CASE__ : str = BatchFeature({"""input_features""": features} ) SCREAMING_SNAKE_CASE__ : int = self.pad( SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # make sure list is in array format SCREAMING_SNAKE_CASE__ : Any = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for feature in input_features] SCREAMING_SNAKE_CASE__ : List[Any] = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: SCREAMING_SNAKE_CASE__ : Tuple = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: SCREAMING_SNAKE_CASE__ : str = ( np.array(SCREAMING_SNAKE_CASE__ , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) is not PaddingStrategy.DO_NOT_PAD else None ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.normalize( padded_inputs["""input_features"""] , attention_mask=SCREAMING_SNAKE_CASE__ ) if return_tensors is not None: SCREAMING_SNAKE_CASE__ : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE__ ) return padded_inputs
712
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def lowercase_ ( _snake_case ,_snake_case=7 ): SCREAMING_SNAKE_CASE__ : Dict = None if token is not None: SCREAMING_SNAKE_CASE__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE__ : List[str] = """636036""" SCREAMING_SNAKE_CASE__ : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' SCREAMING_SNAKE_CASE__ : str = requests.get(_snake_case ,headers=_snake_case ).json() return result["workflow_runs"] def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = get_daily_ci_runs(_snake_case ) SCREAMING_SNAKE_CASE__ : int = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE__ : Union[str, Any] = workflow_run["""id"""] break return workflow_run_id def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Dict = get_last_daily_ci_runs(_snake_case ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE__ : Tuple = get_artifacts_links(worflow_run_id=_snake_case ,token=_snake_case ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE__ : Tuple = artifacts_links[artifact_name] download_artifact( artifact_name=_snake_case ,artifact_url=_snake_case ,output_dir=_snake_case ,token=_snake_case ) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): get_last_daily_ci_artifacts(_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(_snake_case ,f'''{artifact_name}.zip''' ) if os.path.isfile(_snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = {} with zipfile.ZipFile(_snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(_snake_case ): # read the file with z.open(_snake_case ) as f: SCREAMING_SNAKE_CASE__ : List[Any] = f.read().decode("""UTF-8""" ) return results
545
0
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __lowercase : List[str] = logging.get_logger(__name__) __lowercase : Optional[int] = { '''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''', '''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''', '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''', '''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''', '''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''', '''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''', '''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''', '''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''', '''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''', '''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''', '''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''', '''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''', } class _A ( snake_case ): '''simple docstring''' __lowerCamelCase : List[Any] = '''codegen''' __lowerCamelCase : List[str] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,SCREAMING_SNAKE_CASE_=50400 ,SCREAMING_SNAKE_CASE_=2048 ,SCREAMING_SNAKE_CASE_=2048 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=28 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="gelu_new" ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,): '''simple docstring''' snake_case : Dict = vocab_size snake_case : List[str] = n_ctx snake_case : List[Any] = n_positions snake_case : Dict = n_embd snake_case : Union[str, Any] = n_layer snake_case : Tuple = n_head snake_case : List[str] = n_inner snake_case : Union[str, Any] = rotary_dim snake_case : Optional[Any] = activation_function snake_case : List[Any] = resid_pdrop snake_case : Tuple = embd_pdrop snake_case : int = attn_pdrop snake_case : str = layer_norm_epsilon snake_case : Union[str, Any] = initializer_range snake_case : Dict = use_cache snake_case : int = bos_token_id snake_case : str = eos_token_id super().__init__( bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,tie_word_embeddings=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) class _A ( snake_case ): '''simple docstring''' def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = False ,): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ,task=SCREAMING_SNAKE_CASE_ ,patching_specs=SCREAMING_SNAKE_CASE_ ,use_past=SCREAMING_SNAKE_CASE_ ) if not getattr(self._config ,"""pad_token_id""" ,SCREAMING_SNAKE_CASE_ ): # TODO: how to do that better? snake_case : Optional[Any] = 0 @property def snake_case_ ( self ): '''simple docstring''' snake_case : int = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ ,direction="""inputs""" ) snake_case : int = {0: """batch""", 1: """past_sequence + sequence"""} else: snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def snake_case_ ( self ): '''simple docstring''' return self._config.n_layer @property def snake_case_ ( self ): '''simple docstring''' return self._config.n_head def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,): '''simple docstring''' snake_case : Tuple = super(SCREAMING_SNAKE_CASE_ ,self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ ) # We need to order the input in the way they appears in the forward() snake_case : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch snake_case , snake_case : Optional[Any] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values snake_case : str = seqlen + 2 snake_case : Optional[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) snake_case : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers ) ] snake_case : List[str] = common_inputs["""attention_mask"""] if self.use_past: snake_case : Tuple = ordered_inputs["""attention_mask"""].dtype snake_case : Union[str, Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,dtype=SCREAMING_SNAKE_CASE_ )] ,dim=1 ) return ordered_inputs @property def snake_case_ ( self ): '''simple docstring''' return 13
36
'''simple docstring''' import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class __lowercase ( __magic_name__ , unittest.TestCase ): _a = PriorTransformer _a = """hidden_states""" @property def UpperCamelCase__ ( self ) -> int: __a = 4 __a = 8 __a = 7 __a = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase ) __a = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase ) __a = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def UpperCamelCase__ ( self , UpperCamelCase=0 ) -> Union[str, Any]: torch.manual_seed(UpperCamelCase ) __a = 4 __a = 8 __a = 7 __a = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase ) __a = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase ) __a = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def UpperCamelCase__ ( self ) -> List[str]: return (4, 8) @property def UpperCamelCase__ ( self ) -> str: return (4, 8) def UpperCamelCase__ ( self ) -> Dict: __a = { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __a = self.dummy_input return init_dict, inputs_dict def UpperCamelCase__ ( self ) -> List[Any]: __a , __a = PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(UpperCamelCase ) __a = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def UpperCamelCase__ ( self ) -> Optional[Any]: __a , __a = self.prepare_init_args_and_inputs_for_common() __a = self.model_class(**UpperCamelCase ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , UpperCamelCase ) def UpperCamelCase__ ( self ) -> str: __a = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __a = model.to(UpperCamelCase ) if hasattr(UpperCamelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __a = self.get_dummy_seed_input() with torch.no_grad(): __a = model(**UpperCamelCase )[0] __a = output[0, :5].flatten().cpu() print(UpperCamelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __a = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] ) self.assertTrue(torch_all_close(UpperCamelCase , UpperCamelCase , rtol=1e-2 ) ) @slow class __lowercase ( unittest.TestCase ): def UpperCamelCase__ ( self , UpperCamelCase=1 , UpperCamelCase=768 , UpperCamelCase=77 , UpperCamelCase=0 ) -> List[str]: torch.manual_seed(UpperCamelCase ) __a = batch_size __a = embedding_dim __a = num_embeddings __a = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase ) __a = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase ) __a = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def UpperCamelCase__ ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]], [37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]], # fmt: on ] ) def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: __a = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(UpperCamelCase ) __a = self.get_dummy_seed_input(seed=UpperCamelCase ) with torch.no_grad(): __a = model(**UpperCamelCase )[0] assert list(sample.shape ) == [1, 768] __a = sample[0, :8].flatten().cpu() print(UpperCamelCase ) __a = torch.tensor(UpperCamelCase ) assert torch_all_close(UpperCamelCase , UpperCamelCase , atol=1e-3 )
539
0
import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase = "" __UpperCAmelCase = "" __UpperCAmelCase = "" __UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal) def A_ ( ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE = get_dataset(snake_case__ , snake_case__ ) print('Processing...' ) SCREAMING_SNAKE_CASE = update_image_and_anno(snake_case__ , snake_case__ , snake_case__ ) for index, image in enumerate(snake_case__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE = random_chars(3_2 ) SCREAMING_SNAKE_CASE = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] SCREAMING_SNAKE_CASE = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(f'''Success {index+1}/{len(snake_case__ )} with {file_name}''' ) SCREAMING_SNAKE_CASE = [] for anno in new_annos[index]: SCREAMING_SNAKE_CASE = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(snake_case__ ) with open(f'''/{file_root}.txt''' , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def A_ ( lowercase_ , lowercase_ ) ->Tuple: """simple docstring""" SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for label_file in glob.glob(os.path.join(snake_case__ , '*.txt' ) ): SCREAMING_SNAKE_CASE = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(snake_case__ ) as in_file: SCREAMING_SNAKE_CASE = in_file.readlines() SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , f'''{label_name}.jpg''' ) SCREAMING_SNAKE_CASE = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(snake_case__ ) labels.append(snake_case__ ) return img_paths, labels def A_ ( lowercase_ , lowercase_ , lowercase_ = 1 ) ->str: """simple docstring""" SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for idx in range(len(snake_case__ ) ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = img_list[idx] path_list.append(snake_case__ ) SCREAMING_SNAKE_CASE = anno_list[idx] SCREAMING_SNAKE_CASE = cva.imread(snake_case__ ) if flip_type == 1: SCREAMING_SNAKE_CASE = cva.flip(snake_case__ , snake_case__ ) for bbox in img_annos: SCREAMING_SNAKE_CASE = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: SCREAMING_SNAKE_CASE = cva.flip(snake_case__ , snake_case__ ) for bbox in img_annos: SCREAMING_SNAKE_CASE = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(snake_case__ ) new_imgs_list.append(snake_case__ ) return new_imgs_list, new_annos_lists, path_list def A_ ( lowercase_ = 3_2 ) ->str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE = ascii_lowercase + digits return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) ) if __name__ == "__main__": main() print("DONE ✅")
706
import argparse from collections import defaultdict import yaml __UpperCAmelCase = "docs/source/en/_toctree.yml" def A_ ( lowercase_ ) ->Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE = defaultdict(lowercase_ ) for doc in model_doc: counts[doc["local"]] += 1 SCREAMING_SNAKE_CASE = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(lowercase_ ) > 1: raise ValueError( f'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(lowercase_ , key=lambda lowercase_ : s["title"].lower() ) def A_ ( lowercase_=False ) ->List[Any]: """simple docstring""" with open(lowercase_ , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE = content[api_idx]['sections'] # Then to the model doc SCREAMING_SNAKE_CASE = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 SCREAMING_SNAKE_CASE = api_doc[model_idx]['sections'] SCREAMING_SNAKE_CASE = [(idx, section) for idx, section in enumerate(lowercase_ ) if 'sections' in section] SCREAMING_SNAKE_CASE = False for idx, modality_doc in modalities_docs: SCREAMING_SNAKE_CASE = modality_doc['sections'] SCREAMING_SNAKE_CASE = clean_model_doc_toc(lowercase_ ) if old_modality_doc != new_modality_doc: SCREAMING_SNAKE_CASE = True if overwrite: SCREAMING_SNAKE_CASE = new_modality_doc if diff: if overwrite: SCREAMING_SNAKE_CASE = model_doc SCREAMING_SNAKE_CASE = api_doc with open(lowercase_ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __UpperCAmelCase = parser.parse_args() check_model_doc(args.fix_and_overwrite)
259
0
import torch from transformers import AutoModel class __lowerCAmelCase ( torch.nn.Module ): """simple docstring""" def __init__( self : str , _snake_case : str="sayef/fsner-bert-base-uncased" ): """simple docstring""" super(UpperCAmelCase_ , self ).__init__() A__ = AutoModel.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ ) A__ = torch.nn.CosineSimilarity(3 , 1E-08 ) A__ = torch.nn.Softmax(dim=1 ) def _a ( self : str , **_snake_case : int ): """simple docstring""" return self.bert(**UpperCAmelCase_ ).last_hidden_state def _a ( self : Union[str, Any] , _snake_case : Tuple ): """simple docstring""" return token_embeddings.sum(2 , keepdim=UpperCAmelCase_ ) def _a ( self : List[Any] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[Any]=1 ): """simple docstring""" return self.softmax(T * self.cos(UpperCAmelCase_ , UpperCAmelCase_ ) ) def _a ( self : Any , _snake_case : Optional[Any] , _snake_case : int ): """simple docstring""" A__ = W_supports['sizes'].tolist() A__ = W_supports['start_token_id'].item() A__ = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] A__ = self.BERT(**UpperCAmelCase_ ) A__ = self.BERT(**UpperCAmelCase_ ) A__ = None A__ = None A__ = W_supports['input_ids'] == start_token_id A__ = W_supports['input_ids'] == end_token_id for i, size in enumerate(UpperCAmelCase_ ): if i == 0: A__ = 0 else: A__ = support_sizes[i - 1] A__ = S[s : s + size][start_token_masks[s : s + size]] A__ = S[s : s + size][end_token_masks[s : s + size]] A__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) A__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: A__ = torch.vstack((p_starts, p_start) ) A__ = torch.vstack((p_ends, p_end) ) else: A__ = p_start A__ = p_end return p_starts, p_ends
9
"""simple docstring""" def lowerCamelCase__ ( UpperCAmelCase_ = 60_08_51_47_51_43 )-> int: """simple docstring""" try: UpperCamelCase = int(UpperCAmelCase_ ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) UpperCamelCase = 2 UpperCamelCase = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 UpperCamelCase = i while n % i == 0: UpperCamelCase = n // i i += 1 return int(UpperCAmelCase_ ) if __name__ == "__main__": print(F'''{solution() = }''')
554
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __UpperCAmelCase ( __A ): """simple docstring""" _lowerCamelCase = (DPMSolverSinglestepScheduler,) _lowerCamelCase = (("""num_inference_steps""", 25),) def snake_case_ ( self , **__A ): __a = { """num_train_timesteps""": 1000, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """solver_order""": 2, """prediction_type""": """epsilon""", """thresholding""": False, """sample_max_value""": 1.0, """algorithm_type""": """dpmsolver++""", """solver_type""": """midpoint""", """lambda_min_clipped""": -float("""inf""" ), """variance_type""": None, } config.update(**__A ) return config def snake_case_ ( self , __A=0 , **__A ): __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __A ) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config(**__A ) __a = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # copy over dummy past residuals __a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__A ) __a = scheduler_class.from_pretrained(__A ) new_scheduler.set_timesteps(__A ) # copy over dummy past residuals __a = dummy_past_residuals[: new_scheduler.config.solver_order] __a , __a = sample, sample for t in range(__A , time_step + scheduler.config.solver_order + 1 ): __a = scheduler.step(__A , __A , __A , **__A ).prev_sample __a = new_scheduler.step(__A , __A , __A , **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case_ ( self ): pass def snake_case_ ( self , __A=0 , **__A ): __a = dict(self.forward_default_kwargs ) __a = kwargs.pop("""num_inference_steps""" , __A ) __a = self.dummy_sample __a = 0.1 * sample __a = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __a = self.get_scheduler_config() __a = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # copy over dummy past residuals (must be after setting timesteps) __a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__A ) __a = scheduler_class.from_pretrained(__A ) # copy over dummy past residuals new_scheduler.set_timesteps(__A ) # copy over dummy past residual (must be after setting timesteps) __a = dummy_past_residuals[: new_scheduler.config.solver_order] __a = scheduler.step(__A , __A , __A , **__A ).prev_sample __a = new_scheduler.step(__A , __A , __A , **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case_ ( self , __A=None , **__A ): if scheduler is None: __a = self.scheduler_classes[0] __a = self.get_scheduler_config(**__A ) __a = scheduler_class(**__A ) __a = self.scheduler_classes[0] __a = self.get_scheduler_config(**__A ) __a = scheduler_class(**__A ) __a = 10 __a = self.dummy_model() __a = self.dummy_sample_deter scheduler.set_timesteps(__A ) for i, t in enumerate(scheduler.timesteps ): __a = model(__A , __A ) __a = scheduler.step(__A , __A , __A ).prev_sample return sample def snake_case_ ( self ): __a = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) __a = 50 __a = self.dummy_model() __a = self.dummy_sample_deter scheduler.set_timesteps(__A ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): __a = model(__A , __A ) __a = scheduler.step(__A , __A , __A ).prev_sample __a = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.2574 ) < 1E-3 def snake_case_ ( self ): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__A ) def snake_case_ ( self ): # make sure that iterating over schedulers with same config names gives same results # for defaults __a = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) __a = self.full_loop(scheduler=__A ) __a = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 __a = DEISMultistepScheduler.from_config(scheduler.config ) __a = DPMSolverMultistepScheduler.from_config(scheduler.config ) __a = UniPCMultistepScheduler.from_config(scheduler.config ) __a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __a = self.full_loop(scheduler=__A ) __a = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def snake_case_ ( self ): self.check_over_configs(thresholding=__A ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__A , prediction_type=__A , sample_max_value=__A , algorithm_type="""dpmsolver++""" , solver_order=__A , solver_type=__A , ) def snake_case_ ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__A ) def snake_case_ ( self ): for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__A , solver_type=__A , prediction_type=__A , algorithm_type=__A , ) __a = self.full_loop( solver_order=__A , solver_type=__A , prediction_type=__A , algorithm_type=__A , ) assert not torch.isnan(__A ).any(), "Samples have nan numbers" def snake_case_ ( self ): self.check_over_configs(lower_order_final=__A ) self.check_over_configs(lower_order_final=__A ) def snake_case_ ( self ): self.check_over_configs(lambda_min_clipped=-float("""inf""" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def snake_case_ ( self ): self.check_over_configs(variance_type=__A ) self.check_over_configs(variance_type="""learned_range""" ) def snake_case_ ( self ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__A , time_step=0 ) def snake_case_ ( self ): __a = self.full_loop() __a = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def snake_case_ ( self ): __a = self.full_loop(use_karras_sigmas=__A ) __a = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.2248 ) < 1E-3 def snake_case_ ( self ): __a = self.full_loop(prediction_type="""v_prediction""" ) __a = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.1453 ) < 1E-3 def snake_case_ ( self ): __a = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=__A ) __a = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.0649 ) < 1E-3 def snake_case_ ( self ): __a = self.scheduler_classes[0] __a = self.get_scheduler_config(thresholding=__A , dynamic_thresholding_ratio=0 ) __a = scheduler_class(**__A ) __a = 10 __a = self.dummy_model() __a = self.dummy_sample_deter.half() scheduler.set_timesteps(__A ) for i, t in enumerate(scheduler.timesteps ): __a = model(__A , __A ) __a = scheduler.step(__A , __A , __A ).prev_sample assert sample.dtype == torch.floataa
711
from __future__ import annotations from decimal import Decimal from numpy import array def a (lowerCAmelCase__ ): __a = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix __a = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creates a copy of the matrix with swapped positions of the elements __a = [[0.0, 0.0], [0.0, 0.0]] __a , __a = matrix[1][1], matrix[0][0] __a , __a = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(lowerCAmelCase__ ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule __a = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creating cofactor matrix __a = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] __a = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) __a = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) __a = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) __a = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) __a = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) __a = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) __a = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) __a = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) __a = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) __a = array(lowerCAmelCase__ ) for i in range(3 ): for j in range(3 ): __a = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix __a = array(lowerCAmelCase__ ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(lowerCAmelCase__ ) # Calculate the inverse of the matrix return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
209
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self ): __a = tempfile.mkdtemp() __a = BlipImageProcessor() __a = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) __a = BlipProcessor(__A , __A ) processor.save_pretrained(self.tmpdirname ) def snake_case_ ( self , **__A ): return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).tokenizer def snake_case_ ( self , **__A ): return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).image_processor def snake_case_ ( self ): shutil.rmtree(self.tmpdirname ) def snake_case_ ( self ): __a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __a = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self ): __a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __a = self.get_image_processor(do_normalize=__A , padding_value=1.0 ) __a = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def snake_case_ ( self ): __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipProcessor(tokenizer=__A , image_processor=__A ) __a = self.prepare_image_inputs() __a = image_processor(__A , return_tensors="""np""" ) __a = processor(images=__A , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self ): __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipProcessor(tokenizer=__A , image_processor=__A ) __a = """lower newer""" __a = processor(text=__A ) __a = tokenizer(__A , return_token_type_ids=__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self ): __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipProcessor(tokenizer=__A , image_processor=__A ) __a = """lower newer""" __a = self.prepare_image_inputs() __a = processor(text=__A , images=__A ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def snake_case_ ( self ): __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipProcessor(tokenizer=__A , image_processor=__A ) __a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a = processor.batch_decode(__A ) __a = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A ) def snake_case_ ( self ): __a = self.get_image_processor() __a = self.get_tokenizer() __a = BlipProcessor(tokenizer=__A , image_processor=__A ) __a = """lower newer""" __a = self.prepare_image_inputs() __a = processor(text=__A , images=__A ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
99
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params SCREAMING_SNAKE_CASE = getLogger(__name__) SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu' def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 8 , lowerCAmelCase__ = DEFAULT_DEVICE , lowerCAmelCase__=False , lowerCAmelCase__="summarization" , lowerCAmelCase__=None , **lowerCAmelCase__ , ): __a = Path(lowerCAmelCase__ ).open("""w""" , encoding="""utf-8""" ) __a = str(lowerCAmelCase__ ) __a = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) if fpaa: __a = model.half() __a = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. __a = time.time() # update config with task specific params use_task_specific_params(lowerCAmelCase__ , lowerCAmelCase__ ) if prefix is None: __a = prefix or getattr(model.config , """prefix""" , """""" ) or """""" for examples_chunk in tqdm(list(chunks(lowerCAmelCase__ , lowerCAmelCase__ ) ) ): __a = [prefix + text for text in examples_chunk] __a = tokenizer(lowerCAmelCase__ , return_tensors="""pt""" , truncation=lowerCAmelCase__ , padding="""longest""" ).to(lowerCAmelCase__ ) __a = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **lowerCAmelCase__ , ) __a = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) for hypothesis in dec: fout.write(hypothesis + """\n""" ) fout.flush() fout.close() __a = int(time.time() - start_time ) # seconds __a = len(lowerCAmelCase__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def a (): return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" ) def a (lowerCAmelCase__=True ): __a = argparse.ArgumentParser() parser.add_argument("""model_name""" , type=lowerCAmelCase__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""input_path""" , type=lowerCAmelCase__ , help="""like cnn_dm/test.source""" ) parser.add_argument("""save_path""" , type=lowerCAmelCase__ , help="""where to save summaries""" ) parser.add_argument("""--reference_path""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""like cnn_dm/test.target""" ) parser.add_argument("""--score_path""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , default="""metrics.json""" , help="""where to save metrics""" ) parser.add_argument("""--device""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""cuda, cuda:1, cpu etc.""" ) parser.add_argument( """--prefix""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""will be added to the begininng of src examples""" ) parser.add_argument("""--task""" , type=lowerCAmelCase__ , default="""summarization""" , help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""" , type=lowerCAmelCase__ , default=8 , required=lowerCAmelCase__ , help="""batch size""" ) parser.add_argument( """--n_obs""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""How many observations. Defaults to all.""" ) parser.add_argument("""--fp16""" , action="""store_true""" ) parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" ) parser.add_argument( """--info""" , nargs="""?""" , type=lowerCAmelCase__ , const=datetime_now() , help=( """use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g.""" """ lang=en-ru. If no value is passed, the current datetime string will be used.""" ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __a , __a = parser.parse_known_args() __a = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase__ ) if parsed_args and verbose: print(f'''parsed the following generate kwargs: {parsed_args}''' ) __a = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __a = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=lowerCAmelCase__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("""Can't mix --fp16 and --device cpu""" ) __a = generate_summaries_or_translations( lowerCAmelCase__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **lowerCAmelCase__ , ) if args.reference_path is None: return {} # Compute scores __a = calculate_bleu if """translation""" in args.task else calculate_rouge __a = [x.rstrip() for x in open(args.save_path ).readlines()] __a = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowerCAmelCase__ )] __a = score_fn(lowerCAmelCase__ , lowerCAmelCase__ ) scores.update(lowerCAmelCase__ ) if args.dump_args: scores.update(lowerCAmelCase__ ) if args.info: __a = args.info if verbose: print(lowerCAmelCase__ ) if args.score_path is not None: json.dump(lowerCAmelCase__ , open(args.score_path , """w""" ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
99
1
"""simple docstring""" from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , __UpperCAmelCase , ) class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = RobertaConfig snake_case__ = "roberta" def __init__( self : List[str] ,lowerCamelCase__ : Dict ): super().__init__(lowerCamelCase__ ) UpperCAmelCase__ = RobertaEmbeddings(lowerCamelCase__ ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , __UpperCAmelCase , ) class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = RobertaConfig snake_case__ = "roberta" def __init__( self : Union[str, Any] ,lowerCamelCase__ : Optional[int] ): super().__init__(lowerCamelCase__ ) UpperCAmelCase__ = config.num_labels UpperCAmelCase__ = config.num_hidden_layers UpperCAmelCase__ = DeeRobertaModel(lowerCamelCase__ ) UpperCAmelCase__ = nn.Dropout(config.hidden_dropout_prob ) UpperCAmelCase__ = nn.Linear(config.hidden_size ,self.config.num_labels ) @add_start_docstrings_to_model_forward(lowerCamelCase__ ) def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : int=-1 ,lowerCamelCase__ : Any=False ,): UpperCAmelCase__ = self.num_layers try: UpperCAmelCase__ = self.roberta( lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,position_ids=lowerCamelCase__ ,head_mask=lowerCamelCase__ ,inputs_embeds=lowerCamelCase__ ,) UpperCAmelCase__ = outputs[1] UpperCAmelCase__ = self.dropout(lowerCamelCase__ ) UpperCAmelCase__ = self.classifier(lowerCamelCase__ ) UpperCAmelCase__ = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: UpperCAmelCase__ = e.message UpperCAmelCase__ = e.exit_layer UpperCAmelCase__ = outputs[0] if not self.training: UpperCAmelCase__ = entropy(lowerCamelCase__ ) UpperCAmelCase__ = [] UpperCAmelCase__ = [] if labels is not None: if self.num_labels == 1: # We are doing regression UpperCAmelCase__ = MSELoss() UpperCAmelCase__ = loss_fct(logits.view(-1 ) ,labels.view(-1 ) ) else: UpperCAmelCase__ = CrossEntropyLoss() UpperCAmelCase__ = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) # work with highway exits UpperCAmelCase__ = [] for highway_exit in outputs[-1]: UpperCAmelCase__ = highway_exit[0] if not self.training: highway_logits_all.append(lowerCamelCase__ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression UpperCAmelCase__ = MSELoss() UpperCAmelCase__ = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) ) else: UpperCAmelCase__ = CrossEntropyLoss() UpperCAmelCase__ = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) highway_losses.append(lowerCamelCase__ ) if train_highway: UpperCAmelCase__ = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: UpperCAmelCase__ = (loss,) + outputs if not self.training: UpperCAmelCase__ = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: UpperCAmelCase__ = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
632
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ : Any = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class snake_case ( __UpperCAmelCase , unittest.TestCase ): """simple docstring""" snake_case__ = XLMProphetNetTokenizer snake_case__ = False snake_case__ = True def __lowerCAmelCase ( self : Any ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase__ = XLMProphetNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : Dict ): UpperCAmelCase__ = '[PAD]' UpperCAmelCase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ ) def __lowerCAmelCase ( self : List[str] ): UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'[PAD]' ) self.assertEqual(vocab_keys[1] ,'[CLS]' ) self.assertEqual(vocab_keys[-1] ,'j' ) self.assertEqual(len(lowerCamelCase__ ) ,1_012 ) def __lowerCAmelCase ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size ,1_012 ) def __lowerCAmelCase ( self : str ): UpperCAmelCase__ = XLMProphetNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ ) UpperCAmelCase__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowerCamelCase__ ,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCamelCase__ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] ,) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] ,) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ] ,) @cached_property def __lowerCAmelCase ( self : Dict ): return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def __lowerCAmelCase ( self : Optional[Any] ): UpperCAmelCase__ = 'Hello World!' UpperCAmelCase__ = [35_389, 6_672, 49, 2] self.assertListEqual(lowerCamelCase__ ,self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def __lowerCAmelCase ( self : List[str] ): # fmt: off UpperCAmelCase__ = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ ,model_name='microsoft/xprophetnet-large-wiki100-cased' ,revision='1acad1643ddd54a44df6a1b797ada8373685d90e' ,)
632
1
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[Any]: if is_torch_version('<' , '2.0.0' ) or not hasattr(snake_case__ , '_dynamo' ): return False return isinstance(snake_case__ , torch._dynamo.eval_frame.OptimizedModule ) def UpperCamelCase ( snake_case__ : str , snake_case__ : bool = True ) -> Tuple: UpperCamelCase : Tuple = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) UpperCamelCase : Optional[Any] = is_compiled_module(snake_case__ ) if is_compiled: UpperCamelCase : List[str] = model UpperCamelCase : Tuple = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(snake_case__ , snake_case__ ): UpperCamelCase : List[str] = model.module if not keep_fpaa_wrapper: UpperCamelCase : Optional[int] = getattr(snake_case__ , 'forward' ) UpperCamelCase : int = model.__dict__.pop('_original_forward' , snake_case__ ) if original_forward is not None: while hasattr(snake_case__ , '__wrapped__' ): UpperCamelCase : Any = forward.__wrapped__ if forward == original_forward: break UpperCamelCase : Any = forward if getattr(snake_case__ , '_converted_to_transformer_engine' , snake_case__ ): convert_model(snake_case__ , to_transformer_engine=snake_case__ ) if is_compiled: UpperCamelCase : Tuple = model UpperCamelCase : int = compiled_model return model def UpperCamelCase ( ) -> Any: PartialState().wait_for_everyone() def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict ) -> int: if PartialState().distributed_type == DistributedType.TPU: xm.save(snake_case__ , snake_case__ ) elif PartialState().local_process_index == 0: torch.save(snake_case__ , snake_case__ ) @contextmanager def UpperCamelCase ( **snake_case__ : str ) -> str: for key, value in kwargs.items(): UpperCamelCase : int = str(snake_case__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def UpperCamelCase ( snake_case__ : str ) -> Any: if not hasattr(snake_case__ , '__qualname__' ) and not hasattr(snake_case__ , '__name__' ): UpperCamelCase : Tuple = getattr(snake_case__ , '__class__' , snake_case__ ) if hasattr(snake_case__ , '__qualname__' ): return obj.__qualname__ if hasattr(snake_case__ , '__name__' ): return obj.__name__ return str(snake_case__ ) def UpperCamelCase ( snake_case__ : Any , snake_case__ : Optional[Any] ) -> Tuple: for key, value in source.items(): if isinstance(snake_case__ , snake_case__ ): UpperCamelCase : Optional[Any] = destination.setdefault(snake_case__ , {} ) merge_dicts(snake_case__ , snake_case__ ) else: UpperCamelCase : List[Any] = value return destination def UpperCamelCase ( snake_case__ : int = None ) -> bool: if port is None: UpperCamelCase : Union[str, Any] = 29500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
40
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : str =logging.get_logger(__name__) __lowerCAmelCase : Any ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert''' def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = vocab_size __SCREAMING_SNAKE_CASE : List[str] = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : str = initializer_range __SCREAMING_SNAKE_CASE : Dict = layer_norm_eps __SCREAMING_SNAKE_CASE : Dict = position_embedding_type __SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
696
0
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer UpperCamelCase = ["gpt2"] UpperCamelCase = "gpt2" if is_tf_available(): class lowerCAmelCase_ ( tf.Module ): """simple docstring""" def __init__( self :Optional[int] , lowerCamelCase__ :Tuple ): super().__init__() UpperCamelCase__ :Dict = tokenizer UpperCamelCase__ :Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ ) UpperCamelCase__ :Tuple = TFGPTaLMHeadModel.from_config(lowerCamelCase__ ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) ) def __a ( self :Dict , lowerCamelCase__ :Any ): UpperCamelCase__ :Optional[Any] = self.tokenizer(lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = tokenized["""input_ids"""].to_tensor() UpperCamelCase__ :Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) UpperCamelCase__ :Optional[Any] = self.model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )["""logits"""] return outputs @require_tf @require_keras_nlp class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __a ( self :str ): super().setUp() UpperCamelCase__ :int = [GPTaTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)] UpperCamelCase__ :Optional[int] = [TFGPTaTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCamelCase__ :List[str] = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] UpperCamelCase__ :Any = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __a ( self :Dict ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: UpperCamelCase__ :Union[str, Any] = tokenizer([test_inputs] , return_tensors="""tf""" ) UpperCamelCase__ :int = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors UpperCamelCase__ :List[Any] = python_outputs[key].numpy() UpperCamelCase__ :List[str] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(lowerCamelCase__ , tf.intaa ) == tf_outputs_values ) ) @slow def __a ( self :Any ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase__ :Any = tf.function(lowerCamelCase__ ) for test_inputs in self.test_sentences: UpperCamelCase__ :Optional[Any] = tf.constant(lowerCamelCase__ ) UpperCamelCase__ :int = compiled_tokenizer(lowerCamelCase__ ) UpperCamelCase__ :Dict = tf_tokenizer(lowerCamelCase__ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __a ( self :Dict ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase__ :Tuple = ModelToSave(tokenizer=lowerCamelCase__ ) UpperCamelCase__ :List[Any] = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCamelCase__ :Optional[Any] = model.serving(lowerCamelCase__ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCamelCase__ :List[str] = Path(lowerCamelCase__ ) / """saved.model""" tf.saved_model.save(lowerCamelCase__ , lowerCamelCase__ , signatures={"""serving_default""": model.serving} ) UpperCamelCase__ :List[Any] = tf.saved_model.load(lowerCamelCase__ ) UpperCamelCase__ :List[str] = loaded_model.signatures["""serving_default"""](lowerCamelCase__ )["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def __a ( self :Tuple ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase__ :Dict = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCamelCase__ :Dict = tf_tokenizer(lowerCamelCase__ ) # Build model with some sample inputs UpperCamelCase__ :Union[str, Any] = tf_tokenizer.get_config() UpperCamelCase__ :List[Any] = TFGPTaTokenizer.from_config(lowerCamelCase__ ) UpperCamelCase__ :str = model_from_config(lowerCamelCase__ ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def __a ( self :Optional[Any] ): for tf_tokenizer in self.tf_tokenizers: # for the test to run UpperCamelCase__ :Optional[int] = 12_31_23 for max_length in [3, 5, 10_24]: UpperCamelCase__ :Dict = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCamelCase__ :List[str] = tf_tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ ) UpperCamelCase__ :List[Any] = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
713
from manim import * class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __a ( self :Optional[int] ): UpperCamelCase__ :Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) UpperCamelCase__ :int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCamelCase__ :Dict = [mem.copy() for i in range(6 )] UpperCamelCase__ :Any = [mem.copy() for i in range(6 )] UpperCamelCase__ :List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) UpperCamelCase__ :Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) UpperCamelCase__ :Dict = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) UpperCamelCase__ :Union[str, Any] = Text("""CPU""" , font_size=24 ) UpperCamelCase__ :str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase__ ) UpperCamelCase__ :List[str] = [mem.copy() for i in range(1 )] UpperCamelCase__ :Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) UpperCamelCase__ :Optional[Any] = Text("""GPU""" , font_size=24 ) UpperCamelCase__ :Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) gpu.align_to(lowerCamelCase__ , lowerCamelCase__ ) gpu.set_x(gpu.get_x() - 1 ) self.add(lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = [mem.copy() for i in range(6 )] UpperCamelCase__ :Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) UpperCamelCase__ :str = Text("""Model""" , font_size=24 ) UpperCamelCase__ :Optional[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) model.move_to([3, -1.0, 0] ) self.play( Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , ) UpperCamelCase__ :Tuple = MarkupText( f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , ) UpperCamelCase__ :Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCamelCase__ :Tuple = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) ) self.add(lowerCamelCase__ ) UpperCamelCase__ :Any = [] UpperCamelCase__ :List[Any] = [] UpperCamelCase__ :int = [] for i, rect in enumerate(lowerCamelCase__ ): UpperCamelCase__ :int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 ) cpu_target.move_to(lowerCamelCase__ ) cpu_target.generate_target() UpperCamelCase__ :Any = 0.46 / 4 UpperCamelCase__ :Optional[Any] = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase__ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 ) cpu_targs.append(lowerCamelCase__ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) ) second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) ) self.play(*lowerCamelCase__ ) self.play(*lowerCamelCase__ ) self.wait()
383
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "LayoutLMv3ImageProcessor" lowerCamelCase_ = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self :Dict , __A :Union[str, Any]=None , __A :Union[str, Any]=None , **__A :Any ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" ) SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self :Dict , __A :List[str] , __A :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __A :Union[List[List[int]], List[List[List[int]]]] = None , __A :Optional[Union[List[int], List[List[int]]]] = None , __A :bool = True , __A :Union[bool, str, PaddingStrategy] = False , __A :Union[bool, str, TruncationStrategy] = None , __A :Optional[int] = None , __A :int = 0 , __A :Optional[int] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = True , __A :Optional[Union[str, TensorType]] = None , **__A :Optional[Any] , ) -> BatchEncoding: """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor SCREAMING_SNAKE_CASE__ = self.image_processor(images=__A , return_tensors=__A ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__A , __A ): SCREAMING_SNAKE_CASE__ = [text] # add batch dimension (as the image processor always adds a batch dimension) SCREAMING_SNAKE_CASE__ = features["""words"""] SCREAMING_SNAKE_CASE__ = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) # add pixel values SCREAMING_SNAKE_CASE__ = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: SCREAMING_SNAKE_CASE__ = self.get_overflowing_images(__A , encoded_inputs["""overflow_to_sample_mapping"""] ) SCREAMING_SNAKE_CASE__ = images return encoded_inputs def _snake_case ( self :str , __A :Optional[int] , __A :int ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__A ) != len(__A ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" f''' {len(__A )} and {len(__A )}''' ) return images_with_overflow def _snake_case ( self :List[Any] , *__A :int , **__A :Optional[int] ) -> Any: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :Tuple , *__A :int , **__A :str ) -> Optional[Any]: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :str ) -> Tuple: """simple docstring""" return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _snake_case ( self :List[Any] ) -> Optional[Any]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def _snake_case ( self :Dict ) -> Any: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
6
from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Dict = logging.get_logger(__name__) _a : Union[str, Any] = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _UpperCAmelCase ( _A ): """simple docstring""" A = '''vivit''' def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=32 , _lowerCAmelCase=[2, 16, 16] , _lowerCAmelCase=3 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3_072 , _lowerCAmelCase="gelu_fast" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-06 , _lowerCAmelCase=True , **_lowerCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Any = hidden_size lowerCAmelCase__ :Union[str, Any] = num_hidden_layers lowerCAmelCase__ :Dict = num_attention_heads lowerCAmelCase__ :int = intermediate_size lowerCAmelCase__ :List[Any] = hidden_act lowerCAmelCase__ :str = hidden_dropout_prob lowerCAmelCase__ :Tuple = attention_probs_dropout_prob lowerCAmelCase__ :Optional[int] = initializer_range lowerCAmelCase__ :Optional[int] = layer_norm_eps lowerCAmelCase__ :Optional[int] = image_size lowerCAmelCase__ :Any = num_frames lowerCAmelCase__ :List[str] = tubelet_size lowerCAmelCase__ :List[str] = num_channels lowerCAmelCase__ :str = qkv_bias super().__init__(**_lowerCAmelCase )
145
0
'''simple docstring''' from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __a (lowerCamelCase ): __a : "DiagonalGaussianDistribution" class __a (lowerCamelCase , lowerCamelCase ): __a : str = True @register_to_config def __init__( self : str , __magic_name__ : int = 3 , __magic_name__ : int = 3 , __magic_name__ : Tuple[str] = ("DownEncoderBlock2D",) , __magic_name__ : Tuple[str] = ("UpDecoderBlock2D",) , __magic_name__ : Tuple[int] = (64,) , __magic_name__ : int = 1 , __magic_name__ : str = "silu" , __magic_name__ : int = 4 , __magic_name__ : int = 32 , __magic_name__ : int = 32 , __magic_name__ : float = 0.1_8_2_1_5 , ) -> int: """simple docstring""" super().__init__() # pass init params to Encoder UpperCAmelCase_ : Union[str, Any] = Encoder( in_channels=__magic_name__ , out_channels=__magic_name__ , down_block_types=__magic_name__ , block_out_channels=__magic_name__ , layers_per_block=__magic_name__ , act_fn=__magic_name__ , norm_num_groups=__magic_name__ , double_z=__magic_name__ , ) # pass init params to Decoder UpperCAmelCase_ : Dict = Decoder( in_channels=__magic_name__ , out_channels=__magic_name__ , up_block_types=__magic_name__ , block_out_channels=__magic_name__ , layers_per_block=__magic_name__ , norm_num_groups=__magic_name__ , act_fn=__magic_name__ , ) UpperCAmelCase_ : str = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) UpperCAmelCase_ : Optional[Any] = nn.Convad(__magic_name__ , __magic_name__ , 1 ) UpperCAmelCase_ : str = False UpperCAmelCase_ : List[str] = False # only relevant if vae tiling is enabled UpperCAmelCase_ : Dict = self.config.sample_size UpperCAmelCase_ : List[Any] = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) UpperCAmelCase_ : Dict = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) UpperCAmelCase_ : str = 0.2_5 def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=False ) -> Dict: """simple docstring""" if isinstance(__magic_name__ , (Encoder, Decoder) ): UpperCAmelCase_ : List[str] = value def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : bool = True ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[int] = use_tiling def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" self.enable_tiling(__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = True def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict[str, AttentionProcessor]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} def fn_recursive_add_processors(__magic_name__ : str , __magic_name__ : torch.nn.Module , __magic_name__ : Dict[str, AttentionProcessor] ): if hasattr(__magic_name__ , '''set_processor''' ): UpperCAmelCase_ : Any = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __magic_name__ , __magic_name__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__magic_name__ , __magic_name__ , __magic_name__ ) return processors def UpperCAmelCase__ ( self : int , __magic_name__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = len(self.attn_processors.keys() ) if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__magic_name__ )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__magic_name__ : str , __magic_name__ : torch.nn.Module , __magic_name__ : List[str] ): if hasattr(__magic_name__ , '''set_processor''' ): if not isinstance(__magic_name__ , __magic_name__ ): module.set_processor(__magic_name__ ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __magic_name__ , __magic_name__ ) for name, module in self.named_children(): fn_recursive_attn_processor(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True ) -> AutoencoderKLOutput: """simple docstring""" if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__magic_name__ , return_dict=__magic_name__ ) if self.use_slicing and x.shape[0] > 1: UpperCAmelCase_ : Optional[int] = [self.encoder(__magic_name__ ) for x_slice in x.split(1 )] UpperCAmelCase_ : Union[str, Any] = torch.cat(__magic_name__ ) else: UpperCAmelCase_ : List[str] = self.encoder(__magic_name__ ) UpperCAmelCase_ : Dict = self.quant_conv(__magic_name__ ) UpperCAmelCase_ : Any = DiagonalGaussianDistribution(__magic_name__ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__magic_name__ , return_dict=__magic_name__ ) UpperCAmelCase_ : Tuple = self.post_quant_conv(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = self.decoder(__magic_name__ ) if not return_dict: return (dec,) return DecoderOutput(sample=__magic_name__ ) @apply_forward_hook def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if self.use_slicing and z.shape[0] > 1: UpperCAmelCase_ : Union[str, Any] = [self._decode(__magic_name__ ).sample for z_slice in z.split(1 )] UpperCAmelCase_ : Any = torch.cat(__magic_name__ ) else: UpperCAmelCase_ : Dict = self._decode(__magic_name__ ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = min(a.shape[2] , b.shape[2] , __magic_name__ ) for y in range(__magic_name__ ): UpperCAmelCase_ : Optional[int] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , __magic_name__ ) for x in range(__magic_name__ ): UpperCAmelCase_ : Optional[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def UpperCAmelCase__ ( self : str , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True ) -> AutoencoderKLOutput: """simple docstring""" UpperCAmelCase_ : List[Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Union[str, Any] = int(self.tile_latent_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : List[Any] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. UpperCAmelCase_ : Any = [] for i in range(0 , x.shape[2] , __magic_name__ ): UpperCAmelCase_ : int = [] for j in range(0 , x.shape[3] , __magic_name__ ): UpperCAmelCase_ : str = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] UpperCAmelCase_ : str = self.encoder(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = self.quant_conv(__magic_name__ ) row.append(__magic_name__ ) rows.append(__magic_name__ ) UpperCAmelCase_ : List[Any] = [] for i, row in enumerate(__magic_name__ ): UpperCAmelCase_ : Tuple = [] for j, tile in enumerate(__magic_name__ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Optional[Any] = self.blend_v(rows[i - 1][j] , __magic_name__ , __magic_name__ ) if j > 0: UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , __magic_name__ , __magic_name__ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__magic_name__ , dim=3 ) ) UpperCAmelCase_ : List[str] = torch.cat(__magic_name__ , dim=2 ) UpperCAmelCase_ : List[str] = DiagonalGaussianDistribution(__magic_name__ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__magic_name__ ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" UpperCAmelCase_ : Dict = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Union[str, Any] = int(self.tile_sample_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. UpperCAmelCase_ : Union[str, Any] = [] for i in range(0 , z.shape[2] , __magic_name__ ): UpperCAmelCase_ : int = [] for j in range(0 , z.shape[3] , __magic_name__ ): UpperCAmelCase_ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] UpperCAmelCase_ : Union[str, Any] = self.post_quant_conv(__magic_name__ ) UpperCAmelCase_ : Any = self.decoder(__magic_name__ ) row.append(__magic_name__ ) rows.append(__magic_name__ ) UpperCAmelCase_ : Optional[int] = [] for i, row in enumerate(__magic_name__ ): UpperCAmelCase_ : int = [] for j, tile in enumerate(__magic_name__ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Tuple = self.blend_v(rows[i - 1][j] , __magic_name__ , __magic_name__ ) if j > 0: UpperCAmelCase_ : str = self.blend_h(row[j - 1] , __magic_name__ , __magic_name__ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__magic_name__ , dim=3 ) ) UpperCAmelCase_ : List[Any] = torch.cat(__magic_name__ , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__magic_name__ ) def UpperCAmelCase__ ( self : int , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = sample UpperCAmelCase_ : Any = self.encode(__magic_name__ ).latent_dist if sample_posterior: UpperCAmelCase_ : str = posterior.sample(generator=__magic_name__ ) else: UpperCAmelCase_ : Tuple = posterior.mode() UpperCAmelCase_ : str = self.decode(__magic_name__ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__magic_name__ )
644
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __a (lowerCamelCase , unittest.TestCase ): __a : List[str] = BlenderbotSmallTokenizer __a : List[Any] = False def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" super().setUp() UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = '''adapt act apte''' UpperCAmelCase_ : Tuple = '''adapt act apte''' return input_text, output_text def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : List[Any] = '''adapt act apte''' UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te'''] UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] UpperCAmelCase_ : Optional[int] = '''I am a small frog.''' UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) UpperCAmelCase_ : List[Any] = '''I am a small frog .''' UpperCAmelCase_ : Any = '''.''' UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
644
1
from __future__ import annotations def _lowerCamelCase ( lowerCamelCase_: list[int] ): '''simple docstring''' if len(lowerCamelCase_ ) == 0: return array A , A : Tuple = min(lowerCamelCase_ ), max(lowerCamelCase_ ) # Compute the variables A : List[Any] = _max - _min + 1 A , A : Optional[int] = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: A : str = i - _min A : List[str] = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. A : Dict = 0 for i in range(lowerCamelCase_ ): while holes_repeat[i] > 0: A : Optional[Any] = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase_ = input("Enter numbers separated by comma:\n") UpperCamelCase_ = [int(x) for x in user_input.split(",")] print(pigeon_sort(unsorted))
256
import os import sys import unittest UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path UpperCamelCase_ = os.path.join(git_repo_path, "src", "transformers") UpperCamelCase_ = "\n{0} = None\n" UpperCamelCase_ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n" UpperCamelCase_ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" A : int = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' ) self.assertIsNone(snake_case_ ) A : Any = find_backend(''' if not is_tokenizers_available():''' ) self.assertEqual(snake_case_ , '''tokenizers''' ) A : Optional[Any] = find_backend(''' if not is_tensorflow_text_available():''' ) self.assertEqual(snake_case_ , '''tensorflow_text''' ) A : Dict = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' ) self.assertEqual(snake_case_ , '''sentencepiece_and_tokenizers''' ) A : int = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' ) self.assertEqual(snake_case_ , '''sentencepiece_and_tensorflow_text''' ) A : List[Any] = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' ) self.assertEqual(snake_case_ , '''sentencepiece_and_tokenizers_and_vision''' ) def _UpperCAmelCase ( self : int ): """simple docstring""" A : Optional[int] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , snake_case_ ) self.assertIn('''tensorflow_text''' , snake_case_ ) self.assertIn('''sentencepiece_and_tokenizers''' , snake_case_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertModel''' , objects['''tf'''] ) self.assertIn('''FlaxBertModel''' , objects['''flax'''] ) self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] ) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] ) def _UpperCAmelCase ( self : Dict ): """simple docstring""" A : Optional[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(snake_case_ , '''\nCONSTANT = None\n''' ) A : Optional[int] = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( snake_case_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) A : Optional[int] = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' A : Union[str, Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(snake_case_ , snake_case_ ) def _UpperCAmelCase ( self : List[str] ): """simple docstring""" A : Tuple = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' A : Optional[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , snake_case_ )
256
1
from importlib import import_module from .logging import get_logger __lowerCamelCase : List[Any] = get_logger(__name__) class _lowercase : def __init__( self , a , a=None ): snake_case__ : str =attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , snake_case_ , getattr(snake_case_ , snake_case_ ) ) snake_case__ : Tuple =module._original_module if isinstance(snake_case_ , _PatchedModuleObj ) else module class _lowercase : _a : Any = [] def __init__( self , a , a , a , a=None ): snake_case__ : List[str] =obj snake_case__ : int =target snake_case__ : Dict =new snake_case__ : Tuple =target.split(""".""" )[0] snake_case__ : Optional[int] ={} snake_case__ : Optional[Any] =attrs or [] def __enter__( self ): *snake_case__ , snake_case__ : List[str] =self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(snake_case_ ) ): try: snake_case__ : Any =import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): snake_case__ : Any =getattr(self.obj , snake_case_ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(snake_case_ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): snake_case__ : Optional[int] =obj_attr # patch at top level setattr(self.obj , snake_case_ , _PatchedModuleObj(snake_case_ , attrs=self.attrs ) ) snake_case__ : Tuple =getattr(self.obj , snake_case_ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(snake_case_ , snake_case_ , _PatchedModuleObj(getattr(snake_case_ , snake_case_ , snake_case_ ) , attrs=self.attrs ) ) snake_case__ : Dict =getattr(snake_case_ , snake_case_ ) # finally set the target attribute setattr(snake_case_ , snake_case_ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: snake_case__ : int =getattr(import_module(""".""".join(snake_case_ ) ) , snake_case_ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , snake_case_ ) is attr_value: snake_case__ : List[Any] =getattr(self.obj , snake_case_ ) setattr(self.obj , snake_case_ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" snake_case__ : List[Any] =globals()["""__builtins__"""][target_attr] setattr(self.obj , snake_case_ , self.new ) else: raise RuntimeError(F"Tried to patch attribute {target_attr} instead of a submodule." ) def __exit__( self , *a ): for attr in list(self.original ): setattr(self.obj , snake_case_ , self.original.pop(snake_case_ ) ) def lowercase__ ( self ): self.__enter__() self._active_patches.append(self ) def lowercase__ ( self ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
720
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __lowerCamelCase : str = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_28, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __lowerCamelCase : Dict = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __lowerCamelCase : Dict = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55) __lowerCamelCase : str = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) __lowerCamelCase : Optional[Any] = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions __lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(64, 64) ) __lowerCamelCase : List[Any] = tf.keras.preprocessing.image.img_to_array(test_image) __lowerCamelCase : Tuple = np.expand_dims(test_image, axis=0) __lowerCamelCase : Optional[int] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __lowerCamelCase : Dict = """Normal""" if result[0][0] == 1: __lowerCamelCase : List[Any] = """Abnormality detected"""
448
0
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :Union[str, Any] ): return price * (1 + tax_rate) if __name__ == "__main__": print(f"{price_plus_tax(100, 0.25) = }") print(f"{price_plus_tax(125.50, 0.05) = }")
121
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : str =logging.get_logger(__name__) __lowerCAmelCase : Any ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert''' def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = vocab_size __SCREAMING_SNAKE_CASE : List[str] = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : str = initializer_range __SCREAMING_SNAKE_CASE : Dict = layer_norm_eps __SCREAMING_SNAKE_CASE : Dict = position_embedding_type __SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
696
0
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE ( a_ : int , a_ : str , a_ : Any ): if days_between_payments <= 0: raise ValueError('days_between_payments must be > 0' ) if daily_interest_rate < 0: raise ValueError('daily_interest_rate must be >= 0' ) if principal <= 0: raise ValueError('principal must be > 0' ) return principal * daily_interest_rate * days_between_payments def SCREAMING_SNAKE_CASE ( a_ : Union[str, Any] , a_ : Any , a_ : Optional[Any] , ): if number_of_compounding_periods <= 0: raise ValueError('number_of_compounding_periods must be > 0' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' ) if principal <= 0: raise ValueError('principal must be > 0' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def SCREAMING_SNAKE_CASE ( a_ : Dict , a_ : str , a_ : str , ): if number_of_years <= 0: raise ValueError('number_of_years must be > 0' ) if nominal_annual_percentage_rate < 0: raise ValueError('nominal_annual_percentage_rate must be >= 0' ) if principal <= 0: raise ValueError('principal must be > 0' ) return compound_interest( UpperCamelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
720
'''simple docstring''' import os def SCREAMING_SNAKE_CASE ( ): with open(os.path.dirname(a_ ) + '/grid.txt' ) as f: __a = [] # noqa: E741 for _ in range(20 ): l.append([int(a_ ) for x in f.readline().split()] ) __a = 0 # right for i in range(20 ): for j in range(17 ): __a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: __a = temp # down for i in range(17 ): for j in range(20 ): __a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: __a = temp # diagonal 1 for i in range(17 ): for j in range(17 ): __a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: __a = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): __a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: __a = temp return maximum if __name__ == "__main__": print(solution())
490
0
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
112
import pickle import numpy as np from matplotlib import pyplot as plt class __lowerCAmelCase : def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=0.2 , snake_case=0.2 ) -> Any: """simple docstring""" a__ : List[Any] = bp_numa a__ : str = bp_numa a__ : Any = bp_numa a__ : Tuple = conva_get[:2] a__ : Optional[int] = conva_get[2] a__ : Any = size_pa a__ : Dict = rate_w a__ : Tuple = rate_t a__ : int = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] a__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) a__ : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) a__ : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1 a__ : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1 a__ : int = -2 * np.random.rand(self.num_bpa ) + 1 def _snake_case ( self , snake_case ) -> int: """simple docstring""" a__ : str = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(snake_case , "wb" ) as f: pickle.dump(snake_case , snake_case ) print(F"""Model saved: {save_path}""" ) @classmethod def _snake_case ( cls , snake_case ) -> List[Any]: """simple docstring""" with open(snake_case , "rb" ) as f: a__ : List[str] = pickle.load(snake_case ) # noqa: S301 a__ : int = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) a__ : str = model_dic.get("size_pooling1" ) a__ : Tuple = model_dic.get("num_bp1" ) a__ : Optional[int] = model_dic.get("num_bp2" ) a__ : List[str] = model_dic.get("num_bp3" ) a__ : int = model_dic.get("rate_weight" ) a__ : int = model_dic.get("rate_thre" ) # create model instance a__ : Tuple = CNN(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) # modify model parameter a__ : Tuple = model_dic.get("w_conv1" ) a__ : Union[str, Any] = model_dic.get("wkj" ) a__ : List[str] = model_dic.get("vji" ) a__ : Tuple = model_dic.get("thre_conv1" ) a__ : List[str] = model_dic.get("thre_bp2" ) a__ : Optional[Any] = model_dic.get("thre_bp3" ) return conv_ins def _snake_case ( self , snake_case ) -> Optional[int]: """simple docstring""" return 1 / (1 + np.exp(-1 * x )) def _snake_case ( self , snake_case ) -> str: """simple docstring""" return round(snake_case , 3 ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]: """simple docstring""" a__ : Union[str, Any] = convs[0] a__ : int = convs[1] a__ : Optional[int] = np.shape(snake_case )[0] # get the data slice of original image data, data_focus a__ : Tuple = [] for i_focus in range(0 , size_data - size_conv + 1 , snake_case ): for j_focus in range(0 , size_data - size_conv + 1 , snake_case ): a__ : str = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(snake_case ) # calculate the feature map of every single kernel, and saved as list of matrix a__ : Tuple = [] a__ : Optional[int] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(snake_case ): a__ : List[str] = [] for i_focus in range(len(snake_case ) ): a__ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(snake_case ) ) a__ : Optional[int] = np.asmatrix(snake_case ).reshape( snake_case , snake_case ) data_featuremap.append(snake_case ) # expanding the data slice to One dimenssion a__ : int = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(snake_case ) ) a__ : Dict = np.asarray(snake_case ) return focus_list, data_featuremap def _snake_case ( self , snake_case , snake_case , snake_case="average_pool" ) -> Tuple: """simple docstring""" a__ : Dict = len(featuremaps[0] ) a__ : Optional[int] = int(size_map / size_pooling ) a__ : Any = [] for i_map in range(len(snake_case ) ): a__ : List[str] = featuremaps[i_map] a__ : Optional[Any] = [] for i_focus in range(0 , snake_case , snake_case ): for j_focus in range(0 , snake_case , snake_case ): a__ : Tuple = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(snake_case ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(snake_case ) ) a__ : Dict = np.asmatrix(snake_case ).reshape(snake_case , snake_case ) featuremap_pooled.append(snake_case ) return featuremap_pooled def _snake_case ( self , snake_case ) -> Optional[Any]: """simple docstring""" a__ : Optional[Any] = [] for i in range(len(snake_case ) ): a__ : int = np.shape(data[i] ) a__ : Union[str, Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) a__ : str = data_listed.getA().tolist()[0] data_expanded.extend(snake_case ) a__ : Any = np.asarray(snake_case ) return data_expanded def _snake_case ( self , snake_case ) -> str: """simple docstring""" a__ : Dict = np.asarray(snake_case ) a__ : List[Any] = np.shape(snake_case ) a__ : Optional[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any: """simple docstring""" a__ : Dict = [] a__ : List[Any] = 0 for i_map in range(snake_case ): a__ : Tuple = np.ones((size_map, size_map) ) for i in range(0 , snake_case , snake_case ): for j in range(0 , snake_case , snake_case ): a__ : int = pd_pool[ i_pool ] a__ : Any = i_pool + 1 a__ : Optional[Any] = np.multiply( snake_case , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(snake_case ) return pd_all def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=bool ) -> Optional[int]: """simple docstring""" print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(snake_case )) ) print((" - - Shape: Teach_Data ", np.shape(snake_case )) ) a__ : List[Any] = 0 a__ : Any = [] a__ : Dict = 10_000 while rp < n_repeat and mse >= error_accuracy: a__ : Optional[Any] = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(snake_case ) ): # print('------------Learning Image: %d--------------'%p) a__ : Optional[int] = np.asmatrix(datas_train[p] ) a__ : Optional[Any] = np.asarray(datas_teach[p] ) a__ , a__ : Tuple = self.convolute( snake_case , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) a__ : List[Any] = self.pooling(snake_case , self.size_poolinga ) a__ : str = np.shape(snake_case ) a__ : List[Any] = self._expand(snake_case ) a__ : Union[str, Any] = data_bp_input a__ : Optional[Any] = np.dot(snake_case , self.vji.T ) - self.thre_bpa a__ : Optional[int] = self.sig(snake_case ) a__ : Union[str, Any] = np.dot(snake_case , self.wkj.T ) - self.thre_bpa a__ : Dict = self.sig(snake_case ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- a__ : str = np.multiply( (data_teach - bp_outa) , np.multiply(snake_case , (1 - bp_outa) ) ) a__ : List[Any] = np.multiply( np.dot(snake_case , self.wkj ) , np.multiply(snake_case , (1 - bp_outa) ) ) a__ : List[str] = np.dot(snake_case , self.vji ) a__ : List[str] = pd_i_all / (self.size_poolinga * self.size_poolinga) a__ : Tuple = pd_conva_pooled.T.getA().tolist() a__ : List[str] = self._calculate_gradient_from_pool( snake_case , snake_case , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): a__ : Dict = self._expand_mat(pd_conva_all[k_conv] ) a__ : Optional[int] = self.rate_weight * np.dot(snake_case , snake_case ) a__ : Dict = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) a__ : Any = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer a__ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight a__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight a__ : List[str] = self.thre_bpa - pd_k_all * self.rate_thre a__ : int = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image a__ : int = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) a__ : Union[str, Any] = rp + 1 a__ : List[Any] = error_count / patterns all_mse.append(snake_case ) def draw_error(): a__ : List[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(snake_case , "+-" ) plt.plot(snake_case , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(snake_case , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def _snake_case ( self , snake_case ) -> Any: """simple docstring""" a__ : int = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(snake_case )) ) for p in range(len(snake_case ) ): a__ : Union[str, Any] = np.asmatrix(datas_test[p] ) a__ , a__ : Union[str, Any] = self.convolute( snake_case , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) a__ : List[Any] = self.pooling(snake_case , self.size_poolinga ) a__ : List[str] = self._expand(snake_case ) a__ : Union[str, Any] = data_bp_input a__ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa a__ : str = self.sig(snake_case ) a__ : Dict = bp_outa * self.wkj.T - self.thre_bpa a__ : Optional[int] = self.sig(snake_case ) produce_out.extend(bp_outa.getA().tolist() ) a__ : Optional[int] = [list(map(self.do_round , snake_case ) ) for each in produce_out] return np.asarray(snake_case ) def _snake_case ( self , snake_case ) -> Union[str, Any]: """simple docstring""" a__ : Dict = np.asmatrix(snake_case ) a__ , a__ : Optional[int] = self.convolute( snake_case , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) a__ : Any = self.pooling(snake_case , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
112
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : List[str] = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase ( a_ ): """simple docstring""" A : List[str] = "table-transformer" A : Tuple = ["past_key_values"] A : Tuple = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Optional[Any] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Optional[Any]=1_0_0 , UpperCAmelCase_ : Union[str, Any]=6 , UpperCAmelCase_ : Union[str, Any]=2_0_4_8 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : Optional[Any]=2_0_4_8 , UpperCAmelCase_ : List[Any]=8 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : str="relu" , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[Any]="sine" , UpperCAmelCase_ : Union[str, Any]="resnet50" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[Any]=0.1 , **UpperCAmelCase_ : List[str] , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') a : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): a : List[Any] = backbone_config.get('model_type') a : Optional[int] = CONFIG_MAPPING[backbone_model_type] a : Union[str, Any] = config_class.from_dict(UpperCAmelCase_) # set timm attributes to None a : int = None, None, None a : List[str] = use_timm_backbone a : Optional[Any] = backbone_config a : List[str] = num_channels a : List[str] = num_queries a : Union[str, Any] = d_model a : Union[str, Any] = encoder_ffn_dim a : Any = encoder_layers a : Optional[Any] = encoder_attention_heads a : Any = decoder_ffn_dim a : Dict = decoder_layers a : Dict = decoder_attention_heads a : Union[str, Any] = dropout a : Optional[int] = attention_dropout a : Dict = activation_dropout a : Dict = activation_function a : str = init_std a : Dict = init_xavier_std a : Tuple = encoder_layerdrop a : Any = decoder_layerdrop a : Tuple = encoder_layers a : Union[str, Any] = auxiliary_loss a : Tuple = position_embedding_type a : List[str] = backbone a : Union[str, Any] = use_pretrained_backbone a : List[str] = dilation # Hungarian matcher a : Tuple = class_cost a : Optional[int] = bbox_cost a : int = giou_cost # Loss coefficients a : Any = mask_loss_coefficient a : Tuple = dice_loss_coefficient a : Any = bbox_loss_coefficient a : List[Any] = giou_loss_coefficient a : List[str] = eos_coefficient super().__init__(is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int]): """simple docstring""" return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE_ ( self : List[str]): """simple docstring""" return self.d_model class UpperCamelCase ( a_ ): """simple docstring""" A : Dict = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]): """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def SCREAMING_SNAKE_CASE_ ( self : Dict): """simple docstring""" return 1e-5 @property def SCREAMING_SNAKE_CASE_ ( self : List[Any]): """simple docstring""" return 1_2
718
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) UpperCamelCase : Tuple = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCamelCase ( a_ ): """simple docstring""" A : Dict = "yolos" def __init__( self : str , UpperCAmelCase_ : List[Any]=7_6_8 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Dict=3_0_7_2 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[str]=1e-12 , UpperCAmelCase_ : Union[str, Any]=[5_1_2, 8_6_4] , UpperCAmelCase_ : str=1_6 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int=1_0_0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , **UpperCAmelCase_ : Optional[Any] , ): """simple docstring""" super().__init__(**UpperCAmelCase_) a : Union[str, Any] = hidden_size a : int = num_hidden_layers a : Dict = num_attention_heads a : Dict = intermediate_size a : int = hidden_act a : Union[str, Any] = hidden_dropout_prob a : Any = attention_probs_dropout_prob a : Dict = initializer_range a : Union[str, Any] = layer_norm_eps a : str = image_size a : Any = patch_size a : Union[str, Any] = num_channels a : int = qkv_bias a : Union[str, Any] = num_detection_tokens a : Optional[int] = use_mid_position_embeddings a : str = auxiliary_loss # Hungarian matcher a : Optional[Any] = class_cost a : Union[str, Any] = bbox_cost a : List[str] = giou_cost # Loss coefficients a : Any = bbox_loss_coefficient a : Optional[Any] = giou_loss_coefficient a : Union[str, Any] = eos_coefficient class UpperCamelCase ( a_ ): """simple docstring""" A : Optional[int] = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE_ ( self : Tuple): """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]): """simple docstring""" return 1e-4 @property def SCREAMING_SNAKE_CASE_ ( self : int): """simple docstring""" return 1_2
610
0
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = '▁' SCREAMING_SNAKE_CASE = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', 'tokenizer_config_file': 'tokenizer_config.json', } SCREAMING_SNAKE_CASE = { 'vocab_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json', }, 'spm_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_config_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json', }, } SCREAMING_SNAKE_CASE = { 'facebook/m2m100_418M': 1024, } # fmt: off SCREAMING_SNAKE_CASE = { 'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'], 'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de'] } class A_ ( __lowercase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Optional[int] = ["input_ids", "attention_mask"] _SCREAMING_SNAKE_CASE : List[int] = [] _SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self , _A , _A , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<pad>" , _A="<unk>" , _A="m2m100" , _A = None , _A=8 , **_A , ) -> None: """simple docstring""" _UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs _UpperCAmelCase : int = language_codes _UpperCAmelCase : int = FAIRSEQ_LANGUAGE_CODES[language_codes] _UpperCAmelCase : Tuple = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code} _UpperCAmelCase : Tuple = kwargs.get('''additional_special_tokens''' , []) kwargs["additional_special_tokens"] += [ self.get_lang_token(_A) for lang_code in fairseq_language_code if self.get_lang_token(_A) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_A , tgt_lang=_A , bos_token=_A , eos_token=_A , sep_token=_A , unk_token=_A , pad_token=_A , language_codes=_A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_A , **_A , ) _UpperCAmelCase : Union[str, Any] = vocab_file _UpperCAmelCase : Dict = load_json(_A) _UpperCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()} _UpperCAmelCase : Optional[Any] = spm_file _UpperCAmelCase : Any = load_spm(_A , self.sp_model_kwargs) _UpperCAmelCase : Optional[int] = len(self.encoder) _UpperCAmelCase : List[Any] = { self.get_lang_token(_A): self.encoder_size + i for i, lang_code in enumerate(_A) } _UpperCAmelCase : Union[str, Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_A)} _UpperCAmelCase : Optional[int] = {v: k for k, v in self.lang_token_to_id.items()} _UpperCAmelCase : Union[str, Any] = src_lang if src_lang is not None else '''en''' _UpperCAmelCase : Dict = tgt_lang _UpperCAmelCase : str = self.get_lang_id(self._src_lang) self.set_src_lang_special_tokens(self._src_lang) _UpperCAmelCase : int = num_madeup_words @property def snake_case__ ( self) -> int: """simple docstring""" return len(self.encoder) + len(self.lang_token_to_id) @property def snake_case__ ( self) -> str: """simple docstring""" return self._src_lang @src_lang.setter def snake_case__ ( self , _A) -> None: """simple docstring""" _UpperCAmelCase : Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def snake_case__ ( self , _A) -> List[str]: """simple docstring""" return self.sp_model.encode(_A , out_type=_A) def snake_case__ ( self , _A) -> List[Any]: """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(_A , self.encoder[self.unk_token]) def snake_case__ ( self , _A) -> str: """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(_A , self.unk_token) def snake_case__ ( self , _A) -> List[Any]: """simple docstring""" _UpperCAmelCase : int = [] _UpperCAmelCase : List[str] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_A) + token _UpperCAmelCase : List[str] = [] else: current_sub_tokens.append(_A) out_string += self.sp_model.decode(_A) return out_string.strip() def snake_case__ ( self , _A , _A = None , _A = False) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A) _UpperCAmelCase : Tuple = [1] * len(self.prefix_tokens) _UpperCAmelCase : List[str] = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(_A)) + suffix_ones return prefix_ones + ([0] * len(_A)) + ([0] * len(_A)) + suffix_ones def snake_case__ ( self , _A , _A = None) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def snake_case__ ( self) -> Dict: """simple docstring""" _UpperCAmelCase : str = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.__dict__.copy() _UpperCAmelCase : Dict = None return state def __setstate__( self , _A) -> None: """simple docstring""" _UpperCAmelCase : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): _UpperCAmelCase : str = {} _UpperCAmelCase : Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs) def snake_case__ ( self , _A , _A = None) -> Tuple[str]: """simple docstring""" _UpperCAmelCase : Tuple = Path(_A) if not save_dir.is_dir(): raise OSError(f'''{save_directory} should be a directory''') _UpperCAmelCase : Dict = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) _UpperCAmelCase : Any = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _A) if os.path.abspath(self.spm_file) != os.path.abspath(_A) and os.path.isfile(self.spm_file): copyfile(self.spm_file , _A) elif not os.path.isfile(self.spm_file): with open(_A , '''wb''') as fi: _UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_A) return (str(_A), str(_A)) def snake_case__ ( self , _A , _A = "en" , _A = None , _A = "ro" , **_A , ) -> BatchEncoding: """simple docstring""" _UpperCAmelCase : List[Any] = src_lang _UpperCAmelCase : int = tgt_lang self.set_src_lang_special_tokens(self.src_lang) return super().prepare_seqaseq_batch(_A , _A , **_A) def snake_case__ ( self , _A , _A , _A , **_A) -> List[Any]: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''') _UpperCAmelCase : int = src_lang _UpperCAmelCase : Any = self(_A , add_special_tokens=_A , **_A) _UpperCAmelCase : Union[str, Any] = self.get_lang_id(_A) _UpperCAmelCase : List[Any] = tgt_lang_id return inputs def snake_case__ ( self) -> Any: """simple docstring""" self.set_src_lang_special_tokens(self.src_lang) def snake_case__ ( self) -> List[str]: """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang) def snake_case__ ( self , _A) -> None: """simple docstring""" _UpperCAmelCase : Optional[int] = self.get_lang_token(_A) _UpperCAmelCase : List[Any] = self.lang_token_to_id[lang_token] _UpperCAmelCase : Tuple = [self.cur_lang_id] _UpperCAmelCase : List[str] = [self.eos_token_id] def snake_case__ ( self , _A) -> None: """simple docstring""" _UpperCAmelCase : Dict = self.get_lang_token(_A) _UpperCAmelCase : Optional[int] = self.lang_token_to_id[lang_token] _UpperCAmelCase : Tuple = [self.cur_lang_id] _UpperCAmelCase : Dict = [self.eos_token_id] def snake_case__ ( self , _A) -> str: """simple docstring""" return self.lang_code_to_token[lang] def snake_case__ ( self , _A) -> int: """simple docstring""" _UpperCAmelCase : str = self.get_lang_token(_A) return self.lang_token_to_id[lang_token] def _lowerCamelCase ( __A : str , __A : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: _UpperCAmelCase : List[Any] = sentencepiece.SentencePieceProcessor(**__A ) spm.Load(str(__A ) ) return spm def _lowerCamelCase ( __A : str ) -> Union[Dict, List]: with open(__A , '''r''' ) as f: return json.load(__A ) def _lowerCamelCase ( __A : Optional[Any] , __A : str ) -> None: with open(__A , '''w''' ) as f: json.dump(__A , __A , indent=2 )
485
def _lowerCamelCase ( __A : str ) -> list: return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__A ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('doctest').testmod()
485
1
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase : Tuple = logging.get_logger(__name__) def snake_case ( snake_case : List[Any] , snake_case : Any ) -> Tuple: """simple docstring""" lowerCAmelCase = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append( (F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append( (F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append( (F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('encoder.deit.cls_token', 'encoder.embeddings.cls_token'), ('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'), ('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'), ('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'), ('encoder.deit.norm.weight', 'encoder.layernorm.weight'), ('encoder.deit.norm.bias', 'encoder.layernorm.bias'), ] ) return rename_keys def snake_case ( snake_case : Tuple , snake_case : Any ) -> Any: """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) lowerCAmelCase = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' ) lowerCAmelCase = in_proj_weight[ : encoder_config.hidden_size, : ] lowerCAmelCase = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] lowerCAmelCase = in_proj_weight[ -encoder_config.hidden_size :, : ] def snake_case ( snake_case : Optional[int] , snake_case : List[Any] , snake_case : int ) -> List[str]: """simple docstring""" lowerCAmelCase = dct.pop(snake_case ) lowerCAmelCase = val def snake_case ( snake_case : str ) -> List[Any]: """simple docstring""" if "handwritten" in checkpoint_url: lowerCAmelCase = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: lowerCAmelCase = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg' lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('RGB' ) return im @torch.no_grad() def snake_case ( snake_case : int , snake_case : List[Any] ) -> List[Any]: """simple docstring""" lowerCAmelCase = ViTConfig(image_size=384 , qkv_bias=snake_case ) lowerCAmelCase = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: lowerCAmelCase = 768 elif "large" in checkpoint_url: # use ViT-large encoder lowerCAmelCase = 1024 lowerCAmelCase = 4096 lowerCAmelCase = 24 lowerCAmelCase = 16 lowerCAmelCase = 1024 else: raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: lowerCAmelCase = False lowerCAmelCase = 'relu' lowerCAmelCase = 1024 lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False # load HuggingFace model lowerCAmelCase = ViTModel(snake_case , add_pooling_layer=snake_case ) lowerCAmelCase = TrOCRForCausalLM(snake_case ) lowerCAmelCase = VisionEncoderDecoderModel(encoder=snake_case , decoder=snake_case ) model.eval() # load state_dict of original model, rename some keys lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case , map_location='cpu' , check_hash=snake_case )['model'] lowerCAmelCase = create_rename_keys(snake_case , snake_case ) for src, dest in rename_keys: rename_key(snake_case , snake_case , snake_case ) read_in_q_k_v(snake_case , snake_case ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): lowerCAmelCase = state_dict.pop(snake_case ) if key.startswith('decoder' ) and "output_projection" not in key: lowerCAmelCase = val else: lowerCAmelCase = val # load state dict model.load_state_dict(snake_case ) # Check outputs on an image lowerCAmelCase = ViTImageProcessor(size=encoder_config.image_size ) lowerCAmelCase = RobertaTokenizer.from_pretrained('roberta-large' ) lowerCAmelCase = TrOCRProcessor(snake_case , snake_case ) lowerCAmelCase = processor(images=prepare_img(snake_case ) , return_tensors='pt' ).pixel_values # verify logits lowerCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) lowerCAmelCase = model(pixel_values=snake_case , decoder_input_ids=snake_case ) lowerCAmelCase = outputs.logits lowerCAmelCase = torch.Size([1, 1, 50265] ) if "trocr-base-handwritten" in checkpoint_url: lowerCAmelCase = torch.tensor( [-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] ) elif "trocr-large-handwritten" in checkpoint_url: lowerCAmelCase = torch.tensor( [-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] ) elif "trocr-base-printed" in checkpoint_url: lowerCAmelCase = torch.tensor( [-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] ) elif "trocr-large-printed" in checkpoint_url: lowerCAmelCase = torch.tensor( [-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , snake_case , atol=1e-3 ), "First elements of logits not as expected" Path(snake_case ).mkdir(exist_ok=snake_case ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) print(F'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(snake_case ) if __name__ == "__main__": _UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) _UpperCamelCase : int = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
713
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _UpperCamelCase : List[Any] = "\\n\n" _UpperCamelCase : List[Any] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" _UpperCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": lowerCAmelCase = 'cuda' else: lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = model.to(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_SCREAMING_SNAKE_CASE ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" lowerCAmelCase = model.config.max_length - 1 else: lowerCAmelCase = model.config.max_length lowerCAmelCase = tokenizer( _SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors='pt' , return_attention_mask=_SCREAMING_SNAKE_CASE , ).to(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = encodings['input_ids'] lowerCAmelCase = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." lowerCAmelCase = [] lowerCAmelCase = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ): lowerCAmelCase = min(start_index + batch_size , len(_SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = encoded_texts[start_index:end_index] lowerCAmelCase = attn_masks[start_index:end_index] if add_start_token: lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) lowerCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 ) lowerCAmelCase = encoded_batch with torch.no_grad(): lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ).logits lowerCAmelCase = out_logits[..., :-1, :].contiguous() lowerCAmelCase = labels[..., 1:].contiguous() lowerCAmelCase = attn_mask[..., 1:].contiguous() lowerCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_SCREAMING_SNAKE_CASE )}
514
0
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType snake_case__ : List[str] = get_logger(__name__) def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase=0): os.makedirs(__lowercase , exist_ok=__lowercase) with FSDP.state_dict_type( __lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config): UpperCamelCase_ = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: UpperCamelCase_ = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" UpperCamelCase_ = os.path.join(__lowercase , __lowercase) if accelerator.process_index == 0: logger.info(f"""Saving model to {output_model_file}""") torch.save(__lowercase , __lowercase) logger.info(f"""Model saved to {output_model_file}""") elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: UpperCamelCase_ = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) UpperCamelCase_ = os.path.join(__lowercase , __lowercase) logger.info(f"""Saving model to {output_model_file}""") torch.save(__lowercase , __lowercase) logger.info(f"""Model saved to {output_model_file}""") elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: UpperCamelCase_ = os.path.join(__lowercase , f"""{MODEL_NAME}_{model_index}""") os.makedirs(__lowercase , exist_ok=__lowercase) logger.info(f"""Saving model to {ckpt_dir}""") UpperCamelCase_ = {'model': state_dict} dist_cp.save_state_dict( state_dict=__lowercase , storage_writer=dist_cp.FileSystemWriter(__lowercase) , planner=DefaultSavePlanner() , ) logger.info(f"""Model saved to {ckpt_dir}""") def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase=0): accelerator.wait_for_everyone() with FSDP.state_dict_type( __lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__lowercase) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object') return UpperCamelCase_ = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" UpperCamelCase_ = os.path.join(__lowercase , __lowercase) logger.info(f"""Loading model from {input_model_file}""") UpperCamelCase_ = torch.load(__lowercase) logger.info(f"""Model loaded from {input_model_file}""") elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: UpperCamelCase_ = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) UpperCamelCase_ = os.path.join(__lowercase , __lowercase) logger.info(f"""Loading model from {input_model_file}""") UpperCamelCase_ = torch.load(__lowercase) logger.info(f"""Model loaded from {input_model_file}""") elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: UpperCamelCase_ = ( os.path.join(__lowercase , f"""{MODEL_NAME}_{model_index}""") if f"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading model from {ckpt_dir}""") UpperCamelCase_ = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=__lowercase , storage_reader=dist_cp.FileSystemReader(__lowercase) , planner=DefaultLoadPlanner() , ) UpperCamelCase_ = state_dict['model'] logger.info(f"""Model loaded from {ckpt_dir}""") model.load_state_dict(__lowercase) def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=0): os.makedirs(__lowercase , exist_ok=__lowercase) with FSDP.state_dict_type( __lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config): UpperCamelCase_ = FSDP.optim_state_dict(__lowercase , __lowercase) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: UpperCamelCase_ = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) UpperCamelCase_ = os.path.join(__lowercase , __lowercase) logger.info(f"""Saving Optimizer state to {output_optimizer_file}""") torch.save(__lowercase , __lowercase) logger.info(f"""Optimizer state saved in {output_optimizer_file}""") else: UpperCamelCase_ = os.path.join(__lowercase , f"""{OPTIMIZER_NAME}_{optimizer_index}""") os.makedirs(__lowercase , exist_ok=__lowercase) logger.info(f"""Saving Optimizer state to {ckpt_dir}""") dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__lowercase) , planner=DefaultSavePlanner() , ) logger.info(f"""Optimizer state saved in {ckpt_dir}""") def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=0): accelerator.wait_for_everyone() with FSDP.state_dict_type( __lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: UpperCamelCase_ = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: UpperCamelCase_ = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) UpperCamelCase_ = os.path.join(__lowercase , __lowercase) logger.info(f"""Loading Optimizer state from {input_optimizer_file}""") UpperCamelCase_ = torch.load(__lowercase) logger.info(f"""Optimizer state loaded from {input_optimizer_file}""") else: UpperCamelCase_ = ( os.path.join(__lowercase , f"""{OPTIMIZER_NAME}_{optimizer_index}""") if f"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading Optimizer from {ckpt_dir}""") UpperCamelCase_ = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__lowercase) , ) UpperCamelCase_ = optim_state['optimizer'] logger.info(f"""Optimizer loaded from {ckpt_dir}""") UpperCamelCase_ = FSDP.optim_state_dict_to_load(__lowercase , __lowercase , __lowercase) optimizer.load_state_dict(__lowercase)
23
import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class lowercase : '''simple docstring''' def __init__(self , __a , __a , __a ) -> Union[str, Any]: """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError('Destination width/height should be > 0' ) UpperCAmelCase__ = img UpperCAmelCase__ = img.shape[1] UpperCAmelCase__ = img.shape[0] UpperCAmelCase__ = dst_width UpperCAmelCase__ = dst_height UpperCAmelCase__ = self.src_w / self.dst_w UpperCAmelCase__ = self.src_h / self.dst_h UpperCAmelCase__ = UpperCAmelCase__ = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def UpperCamelCase__ (self ) -> Tuple: """simple docstring""" for i in range(self.dst_h ): for j in range(self.dst_w ): UpperCAmelCase__ = self.img[self.get_y(__a )][self.get_x(__a )] def UpperCamelCase__ (self , __a ) -> int: """simple docstring""" return int(self.ratio_x * x ) def UpperCamelCase__ (self , __a ) -> int: """simple docstring""" return int(self.ratio_y * y ) if __name__ == "__main__": _UpperCamelCase , _UpperCamelCase = 800, 600 _UpperCamelCase = imread('''image_data/lena.jpg''', 1) _UpperCamelCase = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output ) waitKey(0) destroyAllWindows()
146
0
'''simple docstring''' from __future__ import annotations def snake_case_ ( _lowerCAmelCase : list , _lowerCAmelCase : int ) -> List[str]: # Checks if the entire collection has been sorted if len(_lowerCAmelCase ) <= 1 or n <= 1: return insert_next(_lowerCAmelCase , n - 1 ) rec_insertion_sort(_lowerCAmelCase , n - 1 ) def snake_case_ ( _lowerCAmelCase : list , _lowerCAmelCase : int ) -> int: # Checks order between adjacent elements if index >= len(_lowerCAmelCase ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order UpperCAmelCase , UpperCAmelCase : List[str] = ( collection[index], collection[index - 1], ) insert_next(_lowerCAmelCase , index + 1 ) if __name__ == "__main__": UpperCamelCase__: List[str] = input("Enter integers separated by spaces: ") UpperCamelCase__: list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
528
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> str: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[str]: UpperCAmelCase : int = tmp_path / '''cache''' UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase : Any = SqlDatasetReader( '''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase ) @require_sqlalchemy @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = tmp_path / '''cache''' UpperCAmelCase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : str = features.copy() if features else default_expected_features UpperCAmelCase : Optional[Any] = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Dict: with contextlib.closing(sqlitea.connect(_lowerCAmelCase ) ) as con: UpperCAmelCase : Any = con.cursor() cur.execute('''SELECT * FROM dataset''' ) for row in cur: yield row @require_sqlalchemy def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Optional[int]: UpperCAmelCase : Optional[int] = tmp_path / '''cache''' UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp.sql''' ) UpperCAmelCase : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read() SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write() UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase ) UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase ) for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ): assert rowa == rowa @require_sqlalchemy def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> int: UpperCAmelCase : Optional[Any] = tmp_path / '''cache''' UpperCAmelCase : Any = os.path.join(_lowerCAmelCase , '''tmp.sql''' ) UpperCAmelCase : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read() SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write() UpperCAmelCase : List[str] = iter_sql_file(_lowerCAmelCase ) UpperCAmelCase : Any = iter_sql_file(_lowerCAmelCase ) for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ): assert rowa == rowa @require_sqlalchemy def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[Any]: UpperCAmelCase : Union[str, Any] = tmp_path / '''cache''' UpperCAmelCase : Tuple = os.path.join(_lowerCAmelCase , '''tmp.sql''' ) UpperCAmelCase : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read() with pytest.raises(_lowerCAmelCase ): SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
528
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( _snake_case , unittest.TestCase ): lowercase = LDMTextToImagePipeline lowercase = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } lowercase = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } lowercase = TEXT_TO_IMAGE_BATCH_PARAMS lowercase = False def snake_case_ ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) A_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) A_ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) A_ = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , ) torch.manual_seed(0 ) A_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) A_ = CLIPTextModel(UpperCamelCase__ ) A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) A_ = { """unet""": unet, """scheduler""": scheduler, """vqvae""": vae, """bert""": text_encoder, """tokenizer""": tokenizer, } return components def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Union[str, Any]: '''simple docstring''' if str(UpperCamelCase__ ).startswith("""mps""" ): A_ = torch.manual_seed(UpperCamelCase__ ) else: A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) A_ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ = self.get_dummy_components() A_ = LDMTextToImagePipeline(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) A_ = self.get_dummy_inputs(UpperCamelCase__ ) A_ = pipe(**UpperCamelCase__ ).images A_ = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) A_ = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class A__ ( unittest.TestCase ): def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=torch.floataa , UpperCamelCase__=0 ) -> List[str]: '''simple docstring''' A_ = torch.manual_seed(UpperCamelCase__ ) A_ = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 32, 32) ) A_ = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) A_ = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def snake_case_ ( self ) -> Optional[int]: '''simple docstring''' A_ = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) A_ = self.get_inputs(UpperCamelCase__ ) A_ = pipe(**UpperCamelCase__ ).images A_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) A_ = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] ) A_ = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class A__ ( unittest.TestCase ): def snake_case_ ( self ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=torch.floataa , UpperCamelCase__=0 ) -> Optional[int]: '''simple docstring''' A_ = torch.manual_seed(UpperCamelCase__ ) A_ = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 32, 32) ) A_ = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) A_ = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def snake_case_ ( self ) -> List[Any]: '''simple docstring''' A_ = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) A_ = self.get_inputs(UpperCamelCase__ ) A_ = pipe(**UpperCamelCase__ ).images[0] A_ = load_numpy( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" ) A_ = np.abs(expected_image - image ).max() assert max_diff < 1e-3
288
'''simple docstring''' import os import sys import unittest __lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowerCamelCase = os.path.join(git_repo_path, '''src''', '''transformers''') __lowerCamelCase = ''' {0} = None ''' __lowerCamelCase = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' __lowerCamelCase = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class A__ ( unittest.TestCase ): def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" ) self.assertIsNone(UpperCamelCase__ ) A_ = find_backend(""" if not is_tokenizers_available():""" ) self.assertEqual(UpperCamelCase__ , """tokenizers""" ) A_ = find_backend(""" if not is_tensorflow_text_available():""" ) self.assertEqual(UpperCamelCase__ , """tensorflow_text""" ) A_ = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" ) self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers""" ) A_ = find_backend( """ if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" ) self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tensorflow_text""" ) A_ = find_backend( """ if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" ) self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers_and_vision""" ) def snake_case_ ( self ) -> int: '''simple docstring''' A_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""" , UpperCamelCase__ ) self.assertIn("""tensorflow_text""" , UpperCamelCase__ ) self.assertIn("""sentencepiece_and_tokenizers""" , UpperCamelCase__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("""BertModel""" , objects["""torch"""] ) self.assertIn("""TFBertModel""" , objects["""tf"""] ) self.assertIn("""FlaxBertModel""" , objects["""flax"""] ) self.assertIn("""BertModel""" , objects["""torch"""] ) self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] ) self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] ) def snake_case_ ( self ) -> str: '''simple docstring''' A_ = create_dummy_object("""CONSTANT""" , """'torch'""" ) self.assertEqual(UpperCamelCase__ , """\nCONSTANT = None\n""" ) A_ = create_dummy_object("""function""" , """'torch'""" ) self.assertEqual( UpperCamelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) A_ = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ A_ = create_dummy_object("""FakeClass""" , """'torch'""" ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def snake_case_ ( self ) -> str: '''simple docstring''' A_ = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) """ A_ = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""] , UpperCamelCase__ )
288
1
"""simple docstring""" import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _lowercase = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS) _lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING _lowercase = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { '''CLIPSegConfig''': True, '''DeformableDetrConfig''': True, '''DetaConfig''': True, '''DinatConfig''': True, '''DonutSwinConfig''': True, '''EfficientFormerConfig''': True, '''FSMTConfig''': True, '''JukeboxConfig''': True, '''LayoutLMv2Config''': True, '''MaskFormerSwinConfig''': True, '''MT5Config''': True, '''NatConfig''': True, '''OneFormerConfig''': True, '''PerceiverConfig''': True, '''RagConfig''': True, '''SpeechT5Config''': True, '''SwinConfig''': True, '''Swin2SRConfig''': True, '''Swinv2Config''': True, '''SwitchTransformersConfig''': True, '''TableTransformerConfig''': True, '''TapasConfig''': True, '''TransfoXLConfig''': True, '''UniSpeechConfig''': True, '''UniSpeechSatConfig''': True, '''WavLMConfig''': True, '''WhisperConfig''': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) '''JukeboxPriorConfig''': True, # TODO: @Younes (for `is_decoder`) '''Pix2StructTextConfig''': True, } ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] ): A = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F'config.{attribute}' in modeling_source or F'getattr(config, "{attribute}"' in modeling_source or F'getattr(self.config, "{attribute}"' in modeling_source ): A = True # Deal with multi-line cases elif ( re.search( rF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , snake_case__ , ) is not None ): A = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: A = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files A = [ "bos_index", "eos_index", "pad_index", "unk_index", "mask_index", "image_size", "use_cache", "out_features", "out_indices", ] A = ["encoder_no_repeat_ngram_size"] # Special cases to be allowed A = True if not attribute_used: A = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: A = True elif attribute in ["tie_word_embeddings"] and default_value is False: A = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: A = True elif attribute.endswith('_token_id' ): A = True # configuration class specific cases if not case_allowed: A = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) A = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def _snake_case ( snake_case__ : int ): A = dict(inspect.signature(config_class.__init__ ).parameters ) A = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]] A = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass A = {} if len(config_class.attribute_map ) > 0: A = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files A = inspect.getsourcefile(snake_case__ ) A = os.path.dirname(snake_case__ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. A = [os.path.join(snake_case__ , snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith('modeling_' )] # Get the source code strings A = [] for path in modeling_paths: if os.path.isfile(snake_case__ ): with open(snake_case__ ) as fp: modeling_sources.append(fp.read() ) A = [] for config_param, default_value in zip(snake_case__ , snake_case__ ): # `attributes` here is all the variant names for `config_param` A = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(snake_case__ , snake_case__ , snake_case__ , snake_case__ ): unused_attributes.append(attributes[0] ) return sorted(snake_case__ ) def _snake_case ( ): A = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) A = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda snake_case__ : inspect.isclass(snake_case__ ) and issubclass(snake_case__ , snake_case__ ) and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: A = check_config_attributes_being_used(snake_case__ ) if len(snake_case__ ) > 0: A = unused_attributes if len(snake_case__ ) > 0: A = "The following configuration classes contain unused attributes in the corresponding modeling files:\n" for name, attributes in configs_with_unused_attributes.items(): error += F'{name}: {attributes}\n' raise ValueError(snake_case__ ) if __name__ == "__main__": check_config_attributes()
718
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str: A = size if size is not None else {'height': 18, 'width': 18} A = parent A = batch_size A = num_channels A = image_size A = min_resolution A = max_resolution A = do_resize A = size A = do_normalize def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ImageGPTImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ ,'clusters' ) ) self.assertTrue(hasattr(A_ ,'do_resize' ) ) self.assertTrue(hasattr(A_ ,'size' ) ) self.assertTrue(hasattr(A_ ,'do_normalize' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} ) A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: A = self.image_processing_class(**self.image_processor_dict ) A = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,obj[key] ) ) else: self.assertEqual(obj[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(A_ ,'image_processor.json' ) image_processor_first.to_json_file(A_ ) A = self.image_processing_class.from_json_file(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(A_ ) A = self.image_processing_class.from_pretrained(A_ ).to_dict() A = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,A_ ) @unittest.skip('ImageGPT requires clusters at initialization' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: pass def _snake_case ( ): A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' ) A = Image.open(dataset[4]['file'] ) A = Image.open(dataset[5]['file'] ) A = [imagea, imagea] return images @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' ) A = prepare_images() # test non-batched A = image_processing(images[0] ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(1, 1024) ) A = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ ) # test batched A = image_processing(A_ ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(2, 1024) ) A = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ )
22
0
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( lowercase_ ,lowercase_ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VQModel SCREAMING_SNAKE_CASE__ : Any = '''sample''' @property def __UpperCAmelCase ( self : Dict , snake_case : Optional[int]=(32, 32) ): """simple docstring""" _snake_case : Optional[int] = 4 _snake_case : List[str] = 3 _snake_case : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) return {"sample": image} @property def __UpperCAmelCase ( self : Tuple ): """simple docstring""" return (3, 32, 32) @property def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" return (3, 32, 32) def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case : Optional[int] = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } _snake_case : Any = self.dummy_input return init_dict, inputs_dict def __UpperCAmelCase ( self : Tuple ): """simple docstring""" pass def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" pass def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case , _snake_case : Dict = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(snake_case ) _snake_case : Any = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" _snake_case : str = VQModel.from_pretrained('fusing/vqgan-dummy' ) model.to(snake_case ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) _snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) _snake_case : Union[str, Any] = image.to(snake_case ) with torch.no_grad(): _snake_case : Tuple = model(snake_case ).sample _snake_case : List[str] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _snake_case : Tuple = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] ) # fmt: on self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
517
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE : '''simple docstring''' SCREAMING_SNAKE_CASE__ : str SCREAMING_SNAKE_CASE__ : str = None @staticmethod def __UpperCAmelCase ( ): """simple docstring""" raise NotImplementedError def __UpperCAmelCase ( self : int , snake_case : Dict , snake_case : int , snake_case : str , **snake_case : Optional[int] ): """simple docstring""" raise NotImplementedError def __UpperCAmelCase ( self : str , snake_case : Dict ): """simple docstring""" raise NotImplementedError def __UpperCAmelCase ( self : Any ): """simple docstring""" if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def __UpperCAmelCase ( cls : Union[str, Any] ): """simple docstring""" return F"""`pip install {cls.pip_package or cls.name}`""" class SCREAMING_SNAKE_CASE ( lowercase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = '''optuna''' @staticmethod def __UpperCAmelCase ( ): """simple docstring""" return is_optuna_available() def __UpperCAmelCase ( self : Any , snake_case : Tuple , snake_case : int , snake_case : str , **snake_case : Optional[int] ): """simple docstring""" return run_hp_search_optuna(snake_case , snake_case , snake_case , **snake_case ) def __UpperCAmelCase ( self : Optional[Any] , snake_case : Dict ): """simple docstring""" return default_hp_space_optuna(snake_case ) class SCREAMING_SNAKE_CASE ( lowercase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = '''ray''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '''\'ray[tune]\'''' @staticmethod def __UpperCAmelCase ( ): """simple docstring""" return is_ray_available() def __UpperCAmelCase ( self : List[str] , snake_case : Tuple , snake_case : int , snake_case : str , **snake_case : str ): """simple docstring""" return run_hp_search_ray(snake_case , snake_case , snake_case , **snake_case ) def __UpperCAmelCase ( self : int , snake_case : Optional[Any] ): """simple docstring""" return default_hp_space_ray(snake_case ) class SCREAMING_SNAKE_CASE ( lowercase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = '''sigopt''' @staticmethod def __UpperCAmelCase ( ): """simple docstring""" return is_sigopt_available() def __UpperCAmelCase ( self : int , snake_case : Optional[int] , snake_case : int , snake_case : str , **snake_case : Dict ): """simple docstring""" return run_hp_search_sigopt(snake_case , snake_case , snake_case , **snake_case ) def __UpperCAmelCase ( self : int , snake_case : List[Any] ): """simple docstring""" return default_hp_space_sigopt(snake_case ) class SCREAMING_SNAKE_CASE ( lowercase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = '''wandb''' @staticmethod def __UpperCAmelCase ( ): """simple docstring""" return is_wandb_available() def __UpperCAmelCase ( self : Dict , snake_case : List[str] , snake_case : int , snake_case : str , **snake_case : Optional[Any] ): """simple docstring""" return run_hp_search_wandb(snake_case , snake_case , snake_case , **snake_case ) def __UpperCAmelCase ( self : Union[str, Any] , snake_case : int ): """simple docstring""" return default_hp_space_wandb(snake_case ) SCREAMING_SNAKE_CASE_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCamelCase__ ( ) -> str: """simple docstring""" _snake_case : Optional[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(a__) > 0: _snake_case : Any = available_backends[0].name if len(a__) > 1: logger.info( F"""{len(a__)} hyperparameter search backends available. Using {name} as the default.""") return name raise RuntimeError( 'No hyperparameter search backend available.\n' + '\n'.join( F""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
517
1
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: A__ = tau * frequency / samplerate A__ = sin(__UpperCamelCase ) A__ = cos(__UpperCamelCase ) A__ = _sin / (2 * q_factor) A__ = (1 - _cos) / 2 A__ = 1 - _cos A__ = 1 + alpha A__ = -2 * _cos A__ = 1 - alpha A__ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: A__ = tau * frequency / samplerate A__ = sin(__UpperCamelCase ) A__ = cos(__UpperCamelCase ) A__ = _sin / (2 * q_factor) A__ = (1 + _cos) / 2 A__ = -1 - _cos A__ = 1 + alpha A__ = -2 * _cos A__ = 1 - alpha A__ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: A__ = tau * frequency / samplerate A__ = sin(__UpperCamelCase ) A__ = cos(__UpperCamelCase ) A__ = _sin / (2 * q_factor) A__ = _sin / 2 A__ = 0 A__ = -ba A__ = 1 + alpha A__ = -2 * _cos A__ = 1 - alpha A__ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: A__ = tau * frequency / samplerate A__ = sin(__UpperCamelCase ) A__ = cos(__UpperCamelCase ) A__ = _sin / (2 * q_factor) A__ = 1 - alpha A__ = -2 * _cos A__ = 1 + alpha A__ = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: A__ = tau * frequency / samplerate A__ = sin(__UpperCamelCase ) A__ = cos(__UpperCamelCase ) A__ = _sin / (2 * q_factor) A__ = 10 ** (gain_db / 40) A__ = 1 + alpha * big_a A__ = -2 * _cos A__ = 1 - alpha * big_a A__ = 1 + alpha / big_a A__ = -2 * _cos A__ = 1 - alpha / big_a A__ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: A__ = tau * frequency / samplerate A__ = sin(__UpperCamelCase ) A__ = cos(__UpperCamelCase ) A__ = _sin / (2 * q_factor) A__ = 10 ** (gain_db / 40) A__ = (big_a + 1) - (big_a - 1) * _cos A__ = (big_a + 1) + (big_a - 1) * _cos A__ = (big_a - 1) - (big_a + 1) * _cos A__ = (big_a - 1) + (big_a + 1) * _cos A__ = 2 * sqrt(__UpperCamelCase ) * alpha A__ = big_a * (pmc + aaa) A__ = 2 * big_a * mpc A__ = big_a * (pmc - aaa) A__ = ppmc + aaa A__ = -2 * pmpc A__ = ppmc - aaa A__ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: A__ = tau * frequency / samplerate A__ = sin(__UpperCamelCase ) A__ = cos(__UpperCamelCase ) A__ = _sin / (2 * q_factor) A__ = 10 ** (gain_db / 40) A__ = (big_a + 1) - (big_a - 1) * _cos A__ = (big_a + 1) + (big_a - 1) * _cos A__ = (big_a - 1) - (big_a + 1) * _cos A__ = (big_a - 1) + (big_a + 1) * _cos A__ = 2 * sqrt(__UpperCamelCase ) * alpha A__ = big_a * (ppmc + aaa) A__ = -2 * big_a * pmpc A__ = big_a * (ppmc - aaa) A__ = pmc + aaa A__ = 2 * mpc A__ = pmc - aaa A__ = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
52
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random''' SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random''' @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[int] ): """simple docstring""" return AutoConfig.from_pretrained(_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _a ( self : Optional[int] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) def _a ( self : int ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _a ( self : str ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _a ( self : str ): """simple docstring""" with self.assertRaises(_snake_case ): create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
52
1
'''simple docstring''' import random class _lowercase : @staticmethod def a ( SCREAMING_SNAKE_CASE_ : str ) -> tuple[list[int], list[int]]: __snake_case = [ord(SCREAMING_SNAKE_CASE_ ) for i in text] __snake_case = [] __snake_case = [] for i in plain: __snake_case = random.randint(1 , 300 ) __snake_case = (i + k) * k cipher.append(SCREAMING_SNAKE_CASE_ ) key.append(SCREAMING_SNAKE_CASE_ ) return cipher, key @staticmethod def a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] ) -> str: __snake_case = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): __snake_case = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(SCREAMING_SNAKE_CASE_ ) ) return "".join(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _a , _a : Optional[Any] = Onepad().encrypt("Hello") print(c, k) print(Onepad().decrypt(c, k))
56
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if ( (cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f) or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) # or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) # or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) # or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) # or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f) or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) # ): # return True return False def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' for char in word: __UpperCamelCase :str = ord(SCREAMING_SNAKE_CASE ) if not _is_chinese_char(SCREAMING_SNAKE_CASE ): return 0 return 1 def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[Any] = set() for token in tokens: __UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE ) if chinese_word: word_set.add(SCREAMING_SNAKE_CASE ) __UpperCamelCase :int = list(SCREAMING_SNAKE_CASE ) return word_list def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' if not chinese_word_set: return bert_tokens __UpperCamelCase :Dict = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] ) __UpperCamelCase :str = bert_tokens __UpperCamelCase , __UpperCamelCase :int = 0, len(SCREAMING_SNAKE_CASE ) while start < end: __UpperCamelCase :Optional[int] = True if is_chinese(bert_word[start] ): __UpperCamelCase :Dict = min(end - start , SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ): __UpperCamelCase :int = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): __UpperCamelCase :Union[str, Any] = '''##''' + bert_word[j] __UpperCamelCase :Dict = start + i __UpperCamelCase :Dict = False break if single_word: start += 1 return bert_word def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Tuple = [] for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ): __UpperCamelCase :List[str] = ltp_tokenizer.seg(lines[i : i + 100] )[0] __UpperCamelCase :int = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res] ltp_res.extend(SCREAMING_SNAKE_CASE ) assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Any = [] for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ): __UpperCamelCase :List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[Any] = [] for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __UpperCamelCase :str = [] for id in input_ids: __UpperCamelCase :Dict = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE ) input_tokens.append(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Dict = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCamelCase :Optional[int] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(SCREAMING_SNAKE_CASE ): if token[:2] == "##": __UpperCamelCase :Union[str, Any] = token[2:] # save chinese tokens' pos if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ): ref_id.append(SCREAMING_SNAKE_CASE ) ref_ids.append(SCREAMING_SNAKE_CASE ) assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) return ref_ids def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: __UpperCamelCase :Any = f.readlines() __UpperCamelCase :str = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' __UpperCamelCase :Optional[Any] = LTP(args.ltp ) # faster in GPU device __UpperCamelCase :Union[str, Any] = BertTokenizer.from_pretrained(args.bert ) __UpperCamelCase :Any = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: __UpperCamelCase :Optional[Any] = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids] f.writelines(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') __lowercase = parser.parse_args() main(args)
167
0
'''simple docstring''' # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests a__ : List[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
710
def snake_case (UpperCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ = 0 lowerCamelCase__ = len(UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def snake_case (UpperCamelCase : Optional[Any] ): '''simple docstring''' if len(UpperCamelCase ) <= 1: return arr, 0 lowerCamelCase__ = len(UpperCamelCase ) // 2 lowerCamelCase__ = arr[0:mid] lowerCamelCase__ = arr[mid:] lowerCamelCase__ , lowerCamelCase__ = count_inversions_recursive(UpperCamelCase ) lowerCamelCase__ , lowerCamelCase__ = count_inversions_recursive(UpperCamelCase ) lowerCamelCase__ , lowerCamelCase__ = _count_cross_inversions(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ = inversion_p + inversions_q + cross_inversions return c, num_inversions def snake_case (UpperCamelCase : str , UpperCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ = [] lowerCamelCase__ = lowerCamelCase__ = lowerCamelCase__ = 0 while i < len(UpperCamelCase ) and j < len(UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def snake_case (): '''simple docstring''' lowerCamelCase__ = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) lowerCamelCase__ = count_inversions_bf(UpperCamelCase ) lowerCamelCase__ , lowerCamelCase__ = count_inversions_recursive(UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() lowerCamelCase__ = count_inversions_bf(UpperCamelCase ) lowerCamelCase__ , lowerCamelCase__ = count_inversions_recursive(UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , UpperCamelCase ) # an empty list should also have zero inversions lowerCamelCase__ = [] lowerCamelCase__ = count_inversions_bf(UpperCamelCase ) lowerCamelCase__ , lowerCamelCase__ = count_inversions_recursive(UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , UpperCamelCase ) if __name__ == "__main__": main()
235
0
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , snake_case_ : Tuple , snake_case_ : List[Any]=100 , snake_case_ : int=13 , snake_case_ : List[str]=30 , snake_case_ : Dict=2 , snake_case_ : int=3 , snake_case_ : List[str]=True , snake_case_ : str=True , snake_case_ : str=32 , snake_case_ : Optional[int]=4 , snake_case_ : int=4 , snake_case_ : List[str]=37 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : str=10 , snake_case_ : Dict=0.02 , snake_case_ : Optional[int]=3 , snake_case_ : List[str]=None , snake_case_ : Any=[0, 1, 2, 3] , ): """simple docstring""" A : Union[str, Any] = parent A : Optional[int] = 100 A : Optional[Any] = batch_size A : Optional[Any] = image_size A : List[str] = patch_size A : str = num_channels A : str = is_training A : List[Any] = use_labels A : Tuple = hidden_size A : int = num_hidden_layers A : int = num_attention_heads A : int = intermediate_size A : Optional[Any] = hidden_act A : List[Any] = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : str = type_sequence_label_size A : int = initializer_range A : List[Any] = scope A : str = out_indices A : Tuple = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A : Tuple = (image_size // patch_size) ** 2 A : Tuple = num_patches + 1 def _UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None A : Optional[Any] = None if self.use_labels: A : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A : str = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCAmelCase ( self : Any ): """simple docstring""" return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCAmelCase ( self : List[Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Optional[Any] ): """simple docstring""" A : int = BeitModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() A : Union[str, Any] = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Optional[Any] ): """simple docstring""" A : Tuple = BeitForMaskedImageModeling(config=snake_case_ ) model.to(snake_case_ ) model.eval() A : str = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCAmelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ): """simple docstring""" A : str = self.type_sequence_label_size A : str = BeitForImageClassification(snake_case_ ) model.to(snake_case_ ) model.eval() A : int = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : int = 1 A : Tuple = BeitForImageClassification(snake_case_ ) model.to(snake_case_ ) model.eval() A : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : List[str] = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCAmelCase ( self : int , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[str] ): """simple docstring""" A : str = self.num_labels A : List[Any] = BeitForSemanticSegmentation(snake_case_ ) model.to(snake_case_ ) model.eval() A : Tuple = model(snake_case_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) A : Optional[int] = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCAmelCase ( self : Tuple ): """simple docstring""" A : List[Any] = self.prepare_config_and_inputs() A , A , A , A : Optional[int] = config_and_inputs A : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( snake_case, snake_case, unittest.TestCase ): lowerCamelCase_ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def _UpperCAmelCase ( self : Dict ): """simple docstring""" A : Any = BeitModelTester(self ) A : Tuple = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 ) def _UpperCAmelCase ( self : str ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''BEiT does not use inputs_embeds''' ) def _UpperCAmelCase ( self : Optional[int] ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def _UpperCAmelCase ( self : int ): """simple docstring""" pass def _UpperCAmelCase ( self : List[str] ): """simple docstring""" A , A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : int = model_class(snake_case_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) ) def _UpperCAmelCase ( self : str ): """simple docstring""" A , A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(snake_case_ ) A : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , snake_case_ ) def _UpperCAmelCase ( self : int ): """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case_ ) def _UpperCAmelCase ( self : str ): """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) def _UpperCAmelCase ( self : int ): """simple docstring""" A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ ) def _UpperCAmelCase ( self : List[Any] ): """simple docstring""" if not self.model_tester.is_training: return A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() A : Union[str, Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case_ ), BeitForMaskedImageModeling]: continue A : Optional[Any] = model_class(snake_case_ ) model.to(snake_case_ ) model.train() A : Union[str, Any] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) A : Union[str, Any] = model(**snake_case_ ).loss loss.backward() def _UpperCAmelCase ( self : Tuple ): """simple docstring""" A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : List[str] = False A : List[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue A : Optional[int] = model_class(snake_case_ ) model.gradient_checkpointing_enable() model.to(snake_case_ ) model.train() A : List[str] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) A : int = model(**snake_case_ ).loss loss.backward() def _UpperCAmelCase ( self : Any ): """simple docstring""" A , A : str = self.model_tester.prepare_config_and_inputs_for_common() A : List[str] = _config_zero_init(snake_case_ ) for model_class in self.all_model_classes: A : List[str] = model_class(config=snake_case_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def _UpperCAmelCase ( self : Tuple ): """simple docstring""" for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Any = BeitModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def _lowerCamelCase ( ): '''simple docstring''' A : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def _UpperCAmelCase ( self : Tuple ): """simple docstring""" return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" A : Optional[Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(snake_case_ ) A : Any = self.default_image_processor A : List[str] = prepare_img() A : int = image_processor(images=snake_case_ , return_tensors='''pt''' ).pixel_values.to(snake_case_ ) # prepare bool_masked_pos A : List[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(snake_case_ ) # forward pass with torch.no_grad(): A : Union[str, Any] = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ ) A : List[str] = outputs.logits # verify the logits A : Optional[int] = torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape , snake_case_ ) A : Any = torch.tensor( [[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(snake_case_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) ) @slow def _UpperCAmelCase ( self : str ): """simple docstring""" A : Dict = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(snake_case_ ) A : Any = self.default_image_processor A : int = prepare_img() A : List[str] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) # forward pass with torch.no_grad(): A : Dict = model(**snake_case_ ) A : Optional[int] = outputs.logits # verify the logits A : Any = torch.Size((1, 1000) ) self.assertEqual(logits.shape , snake_case_ ) A : Dict = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(snake_case_ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) ) A : Dict = 281 self.assertEqual(logits.argmax(-1 ).item() , snake_case_ ) @slow def _UpperCAmelCase ( self : List[str] ): """simple docstring""" A : Optional[int] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to( snake_case_ ) A : Any = self.default_image_processor A : Dict = prepare_img() A : Dict = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) # forward pass with torch.no_grad(): A : Tuple = model(**snake_case_ ) A : int = outputs.logits # verify the logits A : Optional[int] = torch.Size((1, 2_1841) ) self.assertEqual(logits.shape , snake_case_ ) A : Optional[int] = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(snake_case_ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) ) A : Any = 2396 self.assertEqual(logits.argmax(-1 ).item() , snake_case_ ) @slow def _UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" A : str = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) A : List[Any] = model.to(snake_case_ ) A : List[str] = BeitImageProcessor(do_resize=snake_case_ , size=640 , do_center_crop=snake_case_ ) A : List[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) A : Optional[Any] = Image.open(ds[0]['''file'''] ) A : List[str] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) # forward pass with torch.no_grad(): A : List[Any] = model(**snake_case_ ) A : int = outputs.logits # verify the logits A : Optional[int] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , snake_case_ ) A : Union[str, Any] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' ) if is_pillow_less_than_a: A : List[Any] = torch.tensor( [ [[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]], [[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]], [[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]], ] , device=snake_case_ , ) else: A : Union[str, Any] = torch.tensor( [ [[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]], [[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]], [[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]], ] , device=snake_case_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case_ , atol=1E-4 ) ) @slow def _UpperCAmelCase ( self : List[Any] ): """simple docstring""" A : List[Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) A : List[Any] = model.to(snake_case_ ) A : Optional[Any] = BeitImageProcessor(do_resize=snake_case_ , size=640 , do_center_crop=snake_case_ ) A : List[str] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) A : Any = Image.open(ds[0]['''file'''] ) A : List[str] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) # forward pass with torch.no_grad(): A : str = model(**snake_case_ ) A : Any = outputs.logits.detach().cpu() A : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case_ , target_sizes=[(500, 300)] ) A : Optional[Any] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , snake_case_ ) A : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=snake_case_ ) A : str = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , snake_case_ )
256
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def _lowerCamelCase ( ): '''simple docstring''' print('''Making key files...''' ) make_key_files('''rsa''' , 1024 ) print('''Key files generation successful.''' ) def _lowerCamelCase ( lowerCamelCase_: int ): '''simple docstring''' print('''Generating prime p...''' ) A : Optional[Any] = rabinMiller.generate_large_prime(lowerCamelCase_ ) print('''Generating prime q...''' ) A : Tuple = rabinMiller.generate_large_prime(lowerCamelCase_ ) A : Optional[Any] = p * q print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' ) while True: A : Any = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(lowerCamelCase_ , (p - 1) * (q - 1) ) == 1: break print('''Calculating d that is mod inverse of e...''' ) A : Optional[Any] = cryptoMath.find_mod_inverse(lowerCamelCase_ , (p - 1) * (q - 1) ) A : List[Any] = (n, e) A : Optional[Any] = (n, d) return (public_key, private_key) def _lowerCamelCase ( lowerCamelCase_: str , lowerCamelCase_: int ): '''simple docstring''' if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print('''\nWARNING:''' ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() A , A : Union[str, Any] = generate_key(lowerCamelCase_ ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , '''w''' ) as out_file: out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , '''w''' ) as out_file: out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" ) if __name__ == "__main__": main()
256
1
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ): if attention_mask is None: _snake_case : Any = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _snake_case : List[Any] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _snake_case : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ ) if decoder_head_mask is None: _snake_case : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) if cross_attn_head_mask is None: _snake_case : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class _a: def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=7 , __snake_case=True , __snake_case=False , __snake_case=9_9 , __snake_case=1_6 , __snake_case=2 , __snake_case=4 , __snake_case=4 , __snake_case="relu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=2_0 , __snake_case=2 , __snake_case=1 , __snake_case=0 , ) -> Any: '''simple docstring''' _snake_case : Optional[Any] = parent _snake_case : List[str] = batch_size _snake_case : Union[str, Any] = seq_length _snake_case : Optional[Any] = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = vocab_size _snake_case : Optional[Any] = hidden_size _snake_case : Union[str, Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Tuple = intermediate_size _snake_case : str = hidden_act _snake_case : Optional[Any] = hidden_dropout_prob _snake_case : int = attention_probs_dropout_prob _snake_case : int = encoder_layerdrop _snake_case : Tuple = decoder_layerdrop _snake_case : List[str] = max_position_embeddings _snake_case : Tuple = eos_token_id _snake_case : Dict = pad_token_id _snake_case : str = bos_token_id def lowercase ( self ) -> str: '''simple docstring''' _snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : Union[str, Any] = self.eos_token_id # Eos Token _snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _snake_case : int = input_ids.clamp(self.pad_token_id + 1 ) _snake_case : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 ) _snake_case : Union[str, Any] = self.get_config() _snake_case : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A ) return config, inputs_dict def lowercase ( self ) -> Any: '''simple docstring''' return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def lowercase ( self ) -> Optional[Any]: '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() return config, inputs_dict def lowercase ( self , __snake_case , __snake_case ) -> Optional[Any]: '''simple docstring''' _snake_case : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval() _snake_case : List[Any] = inputs_dict["input_ids"] _snake_case : Optional[Any] = inputs_dict["attention_mask"] _snake_case : Union[str, Any] = inputs_dict["head_mask"] # first forward pass _snake_case : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A ) _snake_case : Dict = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids _snake_case : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : List[str] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and _snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) _snake_case : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) _snake_case : Tuple = model(__A , attention_mask=__A )["last_hidden_state"] _snake_case : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[ "last_hidden_state" ] # select random slice _snake_case : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _snake_case : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() _snake_case : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-2 ) ) def lowercase ( self , __snake_case , __snake_case ) -> Any: '''simple docstring''' _snake_case : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval() _snake_case : Union[str, Any] = model(**__A ) _snake_case : Tuple = outputs.encoder_last_hidden_state _snake_case : Union[str, Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _snake_case : Dict = model.get_encoder() encoder.save_pretrained(__A ) _snake_case : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A ) _snake_case : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: _snake_case : Dict = model.get_decoder() decoder.save_pretrained(__A ) _snake_case : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A ) _snake_case : List[str] = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class _a( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowerCamelCase__ :List[str] = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) lowerCamelCase__ :Any = (MaMaaaForConditionalGeneration,) if is_torch_available() else () lowerCamelCase__ :Tuple = ( { 'conversational': MaMaaaForConditionalGeneration, 'feature-extraction': MaMaaaModel, 'summarization': MaMaaaForConditionalGeneration, 'text2text-generation': MaMaaaForConditionalGeneration, 'translation': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) lowerCamelCase__ :Optional[int] = True lowerCamelCase__ :str = True lowerCamelCase__ :str = False lowerCamelCase__ :Union[str, Any] = False def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[Any]: '''simple docstring''' if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def lowercase ( self ) -> int: '''simple docstring''' _snake_case : Any = MaMaaaModelTester(self ) _snake_case : Dict = ConfigTester(self , config_class=__A ) def lowercase ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def lowercase ( self ) -> Union[str, Any]: '''simple docstring''' _snake_case : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _snake_case : int = model_class(__A ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A ) _snake_case : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A ) self.assertEqual(info["missing_keys"] , [] ) def lowercase ( self ) -> Tuple: '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A ) def lowercase ( self ) -> Optional[Any]: '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__A ) def lowercase ( self ) -> Tuple: '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): _snake_case : str = model_class(__A ) model.to(__A ) model.eval() _snake_case : str = copy.deepcopy(self._prepare_for_class(__A , __A ) ) if not self.is_encoder_decoder: _snake_case : Optional[Any] = inputs["input_ids"] del inputs["input_ids"] else: _snake_case : Union[str, Any] = inputs["input_ids"] _snake_case : List[str] = inputs.get("decoder_input_ids" , __A ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , __A ) _snake_case : Tuple = model.get_input_embeddings() if not self.is_encoder_decoder: _snake_case : List[Any] = wte(__A ) else: _snake_case : Any = wte(__A ) _snake_case : Optional[int] = wte(__A ) with torch.no_grad(): model(**__A )[0] def lowercase ( self ) -> Any: '''simple docstring''' _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() _snake_case : Any = input_dict["input_ids"] _snake_case : int = input_ids.ne(1 ).to(__A ) _snake_case : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A ) if torch_device == "cuda": model.half() model.generate(__A , attention_mask=__A ) model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 ) def A ( UpperCAmelCase ): return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ ) __lowerCAmelCase :Optional[Any] = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class _a( unittest.TestCase ): @cached_property def lowercase ( self ) -> List[str]: '''simple docstring''' return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def lowercase ( self ) -> List[str]: '''simple docstring''' _snake_case : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A ) _snake_case : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) _snake_case : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) _snake_case : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): _snake_case : str = model(**__A )[0] _snake_case : Tuple = torch.Size((1, 1_1, 1_0_2_4) ) self.assertEqual(output.shape , __A ) # change to expected output here _snake_case : Optional[Any] = torch.tensor( [[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def lowercase ( self ) -> Any: '''simple docstring''' _snake_case : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A ) # change to intended input _snake_case : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) _snake_case : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) _snake_case : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): _snake_case : Union[str, Any] = model(**__A )[0] _snake_case : Tuple = torch.Size((1, 1_1, model.config.vocab_size) ) self.assertEqual(output.shape , __A ) # change to expected output here _snake_case : List[str] = torch.tensor( [[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def lowercase ( self ) -> Any: '''simple docstring''' _snake_case : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A ) _snake_case : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) _snake_case : List[Any] = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams _snake_case : str = tokenizer(__A , padding=__A , return_tensors="pt" ) _snake_case : Tuple = model.generate( input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) _snake_case : List[str] = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] _snake_case : Dict = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A ) assert generated == expected_en
703
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ): _snake_case : Any = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] _snake_case : Tuple = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] _snake_case : str = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] _snake_case : List[Any] = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ): if split_mlp_wi: _snake_case : List[Any] = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] _snake_case : Tuple = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] _snake_case : Union[str, Any] = (wi_a, wi_a) else: _snake_case : Optional[int] = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""] _snake_case : List[str] = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""] def A ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase ): _snake_case : Any = traverse_util.flatten_dict(variables["target"] ) _snake_case : str = {"/".join(UpperCAmelCase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _snake_case : Tuple = "encoder/layers_0/mlp/wi_0/kernel" in old print("Split MLP:" , UpperCAmelCase ) _snake_case : List[Any] = collections.OrderedDict() # Shared embeddings. _snake_case : Tuple = old["token_embedder/embedding"] # Encoder. for i in range(UpperCAmelCase ): # Block i, layer 0 (Self Attention). _snake_case : Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "pre_attention_layer_norm" ) _snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "attention" ) _snake_case : Optional[Any] = layer_norm _snake_case : int = k.T _snake_case : Optional[int] = o.T _snake_case : Dict = q.T _snake_case : Any = v.T # Block i, layer 1 (MLP). _snake_case : Any = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "pre_mlp_layer_norm" ) _snake_case , _snake_case : Tuple = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , UpperCAmelCase ) _snake_case : Any = layer_norm if split_mlp_wi: _snake_case : Union[str, Any] = wi[0].T _snake_case : List[str] = wi[1].T else: _snake_case : List[Any] = wi.T _snake_case : Dict = wo.T _snake_case : Dict = old[ "encoder/relpos_bias/rel_embedding" ].T _snake_case : Dict = old["encoder/encoder_norm/scale"] if not is_encoder_only: # Decoder. for i in range(UpperCAmelCase ): # Block i, layer 0 (Self Attention). _snake_case : str = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_self_attention_layer_norm" ) _snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "self_attention" ) _snake_case : Optional[Any] = layer_norm _snake_case : Dict = k.T _snake_case : List[str] = o.T _snake_case : List[Any] = q.T _snake_case : List[Any] = v.T # Block i, layer 1 (Cross Attention). _snake_case : Tuple = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_cross_attention_layer_norm" ) _snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "encoder_decoder_attention" ) _snake_case : Tuple = layer_norm _snake_case : Union[str, Any] = k.T _snake_case : str = o.T _snake_case : List[str] = q.T _snake_case : List[Any] = v.T # Block i, layer 2 (MLP). _snake_case : Dict = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_mlp_layer_norm" ) _snake_case , _snake_case : Union[str, Any] = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , UpperCAmelCase ) _snake_case : Optional[Any] = layer_norm if split_mlp_wi: _snake_case : str = wi[0].T _snake_case : Union[str, Any] = wi[1].T else: _snake_case : Optional[Any] = wi.T _snake_case : int = wo.T _snake_case : Optional[int] = old["decoder/decoder_norm/scale"] _snake_case : int = old[ "decoder/relpos_bias/rel_embedding" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _snake_case : Tuple = old["decoder/logits_dense/kernel"].T return new def A ( UpperCAmelCase , UpperCAmelCase ): _snake_case : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _snake_case : Tuple = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _snake_case : List[Any] = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) _snake_case : Optional[int] = state_dict["shared.weight"] return state_dict def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): _snake_case : List[str] = checkpoints.load_tax_checkpoint(UpperCAmelCase ) _snake_case : Dict = convert_tax_to_pytorch(UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase ) _snake_case : Dict = make_state_dict(UpperCAmelCase , UpperCAmelCase ) model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ): _snake_case : str = TaConfig.from_json_file(UpperCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _snake_case : List[Any] = TaEncoderModel(UpperCAmelCase ) else: _snake_case : List[str] = TaForConditionalGeneration(UpperCAmelCase ) # Load weights from tf checkpoint load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase ) # Verify that we can load the checkpoint. model.from_pretrained(UpperCAmelCase ) print("Done" ) if __name__ == "__main__": __lowerCAmelCase :Any = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __lowerCAmelCase :Optional[int] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
278
0
import sys def UpperCamelCase_( snake_case__: Dict ) -> Union[str, Any]: UpperCAmelCase__ = len(__UpperCamelCase ) UpperCAmelCase__ = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )] UpperCAmelCase__ = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )] for chain_length in range(2 , __UpperCamelCase ): for a in range(1 , n - chain_length + 1 ): UpperCAmelCase__ = a + chain_length - 1 UpperCAmelCase__ = sys.maxsize for c in range(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: UpperCAmelCase__ = cost UpperCAmelCase__ = c return matrix, sol def UpperCamelCase_( snake_case__: int , snake_case__: Any , snake_case__: str ) -> List[Any]: if i == j: print('A' + str(__UpperCamelCase ) , end=' ' ) else: print('(' , end=' ' ) print_optiomal_solution(__UpperCamelCase , __UpperCamelCase , optimal_solution[i][j] ) print_optiomal_solution(__UpperCamelCase , optimal_solution[i][j] + 1 , __UpperCamelCase ) print(')' , end=' ' ) def UpperCamelCase_( ) -> Optional[Any]: UpperCAmelCase__ = [30, 35, 15, 5, 10, 20, 25] UpperCAmelCase__ = len(__UpperCamelCase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 UpperCAmelCase__ = matrix_chain_order(__UpperCamelCase ) print('No. of Operation required: ' + str(matrix[1][n - 1] ) ) print_optiomal_solution(__UpperCamelCase , 1 , n - 1 ) if __name__ == "__main__": main()
146
"""simple docstring""" def __lowerCamelCase ( __UpperCamelCase ) -> str: """simple docstring""" return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] ) def __lowerCamelCase ( __UpperCamelCase ) -> bytes: """simple docstring""" if (len(__UpperCamelCase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
610
0
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = '''mvp''' __UpperCamelCase = ['''past_key_values'''] __UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , _a=50_267 , _a=1_024 , _a=12 , _a=4_096 , _a=16 , _a=12 , _a=4_096 , _a=16 , _a=0.0 , _a=0.0 , _a="gelu" , _a=1_024 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=0.0 , _a=False , _a=True , _a=1 , _a=0 , _a=2 , _a=True , _a=2 , _a=2 , _a=False , _a=100 , _a=800 , **_a , ): """simple docstring""" lowerCamelCase = vocab_size lowerCamelCase = max_position_embeddings lowerCamelCase = d_model lowerCamelCase = encoder_ffn_dim lowerCamelCase = encoder_layers lowerCamelCase = encoder_attention_heads lowerCamelCase = decoder_ffn_dim lowerCamelCase = decoder_layers lowerCamelCase = decoder_attention_heads lowerCamelCase = dropout lowerCamelCase = attention_dropout lowerCamelCase = activation_dropout lowerCamelCase = activation_function lowerCamelCase = init_std lowerCamelCase = encoder_layerdrop lowerCamelCase = decoder_layerdrop lowerCamelCase = classifier_dropout lowerCamelCase = use_cache lowerCamelCase = encoder_layers lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase = use_prompt lowerCamelCase = prompt_length lowerCamelCase = prompt_mid_dim super().__init__( pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , is_encoder_decoder=A__ , decoder_start_token_id=A__ , forced_eos_token_id=A__ , **A__ , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , A__ ): lowerCamelCase = self.bos_token_id warnings.warn( f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' """The config can simply be saved and uploaded again to be fixed.""" )
720
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @property def _lowerCAmelCase ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.dummy_uncond_unet lowerCamelCase = ScoreSdeVeScheduler() lowerCamelCase = ScoreSdeVePipeline(unet=_a , scheduler=_a ) sde_ve.to(_a ) sde_ve.set_progress_bar_config(disable=_a ) lowerCamelCase = torch.manual_seed(0 ) lowerCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_a ).images lowerCamelCase = torch.manual_seed(0 ) lowerCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_a , return_dict=_a )[ 0 ] lowerCamelCase = image[0, -3:, -3:, -1] lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = """google/ncsnpp-church-256""" lowerCamelCase = UNetaDModel.from_pretrained(_a ) lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(_a ) lowerCamelCase = ScoreSdeVePipeline(unet=_a , scheduler=_a ) sde_ve.to(_a ) sde_ve.set_progress_bar_config(disable=_a ) lowerCamelCase = torch.manual_seed(0 ) lowerCamelCase = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_a ).images lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
533
0
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : List[str] = True lowerCamelCase__ : Optional[Any] = False if __name__ == "__main__": lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--repo_path', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') lowerCamelCase__ : Optional[Any] = parser.parse_args() lowerCamelCase__ : List[Any] = { 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } lowerCamelCase__ : str = { 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } lowerCamelCase__ : str = '' if has_file(args.repo_path, 'config.json') else 'unet' with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader: lowerCamelCase__ : Optional[int] = reader.read() lowerCamelCase__ : Optional[int] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, 'config.json'): lowerCamelCase__ : Any = UNetaDModel(**config) else: lowerCamelCase__ : Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel lowerCamelCase__ : Optional[Any] = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) lowerCamelCase__ : Optional[Any] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: lowerCamelCase__ : List[str] = config[key] del config[key] lowerCamelCase__ : str = [k.replace('UNetRes', '') for k in config['down_block_types']] lowerCamelCase__ : List[str] = [k.replace('UNetRes', '') for k in config['up_block_types']] if do_only_weights: lowerCamelCase__ : Optional[Any] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin')) lowerCamelCase__ : Dict = {} for param_key, param_value in state_dict.items(): if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'): continue lowerCamelCase__ : int = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('.')[0] == key: lowerCamelCase__ : Dict = param_value lowerCamelCase__ : str = True if not has_changed: lowerCamelCase__ : Dict = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
31
"""simple docstring""" import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('''<mask>''') == 1 __a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1 __a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple __a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() __a : Any = logits[0, masked_index, :] __a : Any = logits.softmax(dim=0) __a , __a : Optional[Any] = prob.topk(k=a_ , dim=0) __a : Optional[int] = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))]) __a : List[str] = tokenizer.mask_token __a : Optional[int] = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')): __a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''') if " {0}".format(a_) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(a_) , a_), values[index].item(), predicted_token, )) else: topk_filled_outputs.append( ( masked_input.replace(a_ , a_), values[index].item(), predicted_token, )) return topk_filled_outputs A = CamembertTokenizer.from_pretrained('''camembert-base''') A = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() A = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
52
0
def UpperCAmelCase__ ( UpperCAmelCase__ :int = 10_00 ): '''simple docstring''' a , a = 1, 1 a = 2 while True: a = 0 a = fa + fa a , a = fa, f index += 1 for _ in str(UpperCAmelCase__ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
32
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
1
'''simple docstring''' def lowercase__ ( __UpperCamelCase : int , __UpperCamelCase : int ): '''simple docstring''' return "\n".join( F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
566
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case : Tuple = logging.get_logger(__name__) snake_case : List[Any] = '▁' snake_case : Tuple = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', } snake_case : Optional[Any] = { 'vocab_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json' ), }, 'spm_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model' ) }, } snake_case : str = { 'facebook/s2t-small-librispeech-asr': 1_024, } snake_case : Optional[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de'] snake_case : Union[str, Any] = {'mustc': MUSTC_LANGS} class lowerCamelCase__( snake_case_ ): UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : List[Any] = MAX_MODEL_INPUT_SIZES UpperCamelCase : List[str] = ["input_ids", "attention_mask"] UpperCamelCase : List[int] = [] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = None , **__UpperCAmelCase , ): """simple docstring""" __lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_upper_case=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , lang_codes=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) __lowercase = do_upper_case __lowercase = do_lower_case __lowercase = load_json(__UpperCAmelCase ) __lowercase = {v: k for k, v in self.encoder.items()} __lowercase = spm_file __lowercase = load_spm(__UpperCAmelCase , self.sp_model_kwargs ) if lang_codes is not None: __lowercase = lang_codes __lowercase = LANGUAGES[lang_codes] __lowercase = [F'''<lang:{lang}>''' for lang in self.langs] __lowercase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs} __lowercase = self.lang_tokens __lowercase = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: __lowercase = {} @property def __magic_name__ ( self ): """simple docstring""" return len(self.encoder ) @property def __magic_name__ ( self ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" __lowercase = new_tgt_lang self.set_tgt_lang_special_tokens(__UpperCAmelCase ) def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" __lowercase = self.lang_code_to_id[tgt_lang] __lowercase = [lang_code_id] def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" return self.encoder.get(__UpperCAmelCase , self.encoder[self.unk_token] ) def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" return self.decoder.get(__UpperCAmelCase , self.unk_token ) def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" __lowercase = [] __lowercase = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: __lowercase = self.sp_model.decode(__UpperCAmelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " __lowercase = [] else: current_sub_tokens.append(__UpperCAmelCase ) __lowercase = self.sp_model.decode(__UpperCAmelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) __lowercase = [1] * len(self.prefix_tokens ) __lowercase = [1] if token_ids_a is None: return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones def __magic_name__ ( self ): """simple docstring""" __lowercase = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __lowercase = self.__dict__.copy() __lowercase = None return state def __setstate__( self , __UpperCAmelCase ): """simple docstring""" __lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __lowercase = {} __lowercase = load_spm(self.spm_file , self.sp_model_kwargs ) def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ): """simple docstring""" __lowercase = Path(__UpperCAmelCase ) assert save_dir.is_dir(), F'''{save_directory} should be a directory''' __lowercase = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) __lowercase = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__UpperCAmelCase , """wb""" ) as fi: __lowercase = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (str(__UpperCAmelCase ), str(__UpperCAmelCase )) def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : Dict[str, Any] ): '''simple docstring''' __lowercase = sentencepiece.SentencePieceProcessor(**__UpperCamelCase ) spm.Load(str(__UpperCamelCase ) ) return spm def lowercase__ ( __UpperCamelCase : str ): '''simple docstring''' with open(__UpperCamelCase , """r""" ) as f: return json.load(__UpperCamelCase ) def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : str ): '''simple docstring''' with open(__UpperCamelCase , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase , indent=2 )
566
1
'''simple docstring''' from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets snake_case_ : str = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n' snake_case_ : int = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n' snake_case_ : int = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def __snake_case ( _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[int]): return float((preds == labels).mean()) def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[Any]): UpperCamelCase = simple_accuracy(_UpperCAmelCase, _UpperCAmelCase) UpperCamelCase = float(fa_score(y_true=_UpperCAmelCase, y_pred=_UpperCAmelCase)) return { "accuracy": acc, "f1": fa, } def __snake_case ( _UpperCAmelCase : Optional[Any], _UpperCAmelCase : str): UpperCamelCase = float(pearsonr(_UpperCAmelCase, _UpperCAmelCase)[0]) UpperCamelCase = float(spearmanr(_UpperCAmelCase, _UpperCAmelCase)[0]) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self ): '''simple docstring''' if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["sst2", "mnli", "mnli_mismatched", "mnli_matched", ''' '''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ )} elif self.config_name == "stsb": return pearson_and_spearman(lowerCamelCase__ , lowerCamelCase__ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["sst2", "mnli", "mnli_mismatched", "mnli_matched", ''' '''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
350
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation snake_case_ : Union[str, Any] = logging.get_logger(__name__) snake_case_ : Union[str, Any] = {'vocab_file': 'spiece.model'} snake_case_ : Any = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } snake_case_ : Dict = { 'AI-Sweden/gpt-sw3-126m': 2_048, 'AI-Sweden/gpt-sw3-350m': 2_048, 'AI-Sweden/gpt-sw3-1.6b': 2_048, 'AI-Sweden/gpt-sw3-6.7b': 2_048, 'AI-Sweden/gpt-sw3-20b': 2_048, } class lowercase__ ( snake_case_ ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = None , **lowerCamelCase__ , ): '''simple docstring''' UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs UpperCamelCase = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) UpperCamelCase = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing UpperCamelCase = '''<|endoftext|>''' if eos_token is None else eos_token UpperCamelCase = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: UpperCamelCase = unk_token if pad_token is None else pad_token UpperCamelCase = eos_token if bos_token is None else bos_token else: UpperCamelCase = '''<pad>''' if pad_token is None else pad_token UpperCamelCase = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , ) UpperCamelCase = do_lower_case UpperCamelCase = remove_space UpperCamelCase = keep_accents UpperCamelCase = vocab_file UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase__ ) # Used for whitespace normalization in input texts # fmt : off UpperCamelCase = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing UpperCamelCase = re.compile( f'[{"".join(map(lowerCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' ) def __getstate__( self ): '''simple docstring''' UpperCamelCase = self.__dict__.copy() UpperCamelCase = None return state def __setstate__( self , lowerCamelCase__ ): '''simple docstring''' UpperCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCamelCase = {} UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCAmelCase ( self ): '''simple docstring''' return len(self.sp_model ) def UpperCAmelCase ( self , lowerCamelCase__ ): '''simple docstring''' UpperCamelCase = self.non_printing_characters_re.sub('''''' , lowerCamelCase__ ) # Normalize whitespaces UpperCamelCase = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization UpperCamelCase = unicodedata.normalize('''NFC''' , lowerCamelCase__ ) return text def UpperCAmelCase ( self , lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' UpperCamelCase = self.preprocess_text(lowerCamelCase__ ) return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ ) def UpperCAmelCase ( self , lowerCamelCase__ ): '''simple docstring''' return self.sp_model.PieceToId(lowerCamelCase__ ) def UpperCAmelCase ( self , lowerCamelCase__ ): '''simple docstring''' return self.sp_model.IdToPiece(lowerCamelCase__ ) @staticmethod def UpperCAmelCase ( lowerCamelCase__ ): '''simple docstring''' return out_string def UpperCAmelCase ( self , lowerCamelCase__ ): '''simple docstring''' UpperCamelCase = [] UpperCamelCase = '''''' UpperCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase__ ) + token UpperCamelCase = True UpperCamelCase = [] else: current_sub_tokens.append(lowerCamelCase__ ) UpperCamelCase = False out_string += self.sp_model.decode(lowerCamelCase__ ) return out_string def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCamelCase = os.path.join( lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase__ , '''wb''' ) as fi: UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase__ ) return (out_vocab_file,) def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False ): '''simple docstring''' if isinstance(lowerCamelCase__ , lowerCamelCase__ ): UpperCamelCase = self.preprocess_text(lowerCamelCase__ ) UpperCamelCase = self.sp_model.encode(lowerCamelCase__ ) else: UpperCamelCase = [self.preprocess_text(lowerCamelCase__ ) for t in text] UpperCamelCase = self.sp_model.encode(lowerCamelCase__ ) if return_tensors is True or return_tensors == "pt": UpperCamelCase = torch.tensor(lowerCamelCase__ ) return token_ids def UpperCAmelCase ( self , lowerCamelCase__ ): '''simple docstring''' return self.sp_model.decode(lowerCamelCase__ ) def UpperCAmelCase ( self , lowerCamelCase__ ): '''simple docstring''' UpperCamelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()] UpperCamelCase = ( f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowerCamelCase__ ) + f'{self.bos_token}Bot:' ) return self.encode(text=lowerCamelCase__ )
350
1
from queue import PriorityQueue from typing import Any import numpy as np def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : str ,__UpperCamelCase : set ,__UpperCamelCase : set ,__UpperCamelCase : dict ,__UpperCamelCase : dict ,__UpperCamelCase : PriorityQueue ,__UpperCamelCase : dict ,__UpperCamelCase : float | int ,): """simple docstring""" for nxt, d in graph[v]: if nxt in visited_forward: continue A_ = cst_fwd.get(__UpperCamelCase ,np.inf ) A_ = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) A_ = new_cost_f A_ = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: A_ = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : dict ,__UpperCamelCase : dict ): """simple docstring""" A_ = -1 A_ = set() A_ = set() A_ = {source: 0} A_ = {destination: 0} A_ = {source: None} A_ = {destination: None} A_ = PriorityQueue() A_ = PriorityQueue() A_ = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): A_ , A_ = queue_forward.get() visited_forward.add(__UpperCamelCase ) A_ , A_ = queue_backward.get() visited_backward.add(__UpperCamelCase ) A_ = pass_and_relaxation( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) A_ = pass_and_relaxation( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: A_ = shortest_distance return shortest_path_distance __a :List[str] = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } __a :Union[str, Any] = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
86
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): a : Dict = StableDiffusionSAGPipeline a : Any = TEXT_TO_IMAGE_PARAMS a : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS a : Tuple = False def _snake_case ( self : List[Any] ) ->Tuple: '''simple docstring''' torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _UpperCAmelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _UpperCAmelCase = CLIPTextModel(__UpperCamelCase ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _UpperCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any=0 ) ->Any: '''simple docstring''' if str(__UpperCamelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__UpperCamelCase ) else: _UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) _UpperCAmelCase = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def _snake_case ( self : Optional[Any] ) ->Optional[int]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): def _snake_case ( self : Union[str, Any] ) ->str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Tuple ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) _UpperCAmelCase = sag_pipe.to(__UpperCamelCase ) sag_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = """.""" _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = sag_pipe( [prompt] , generator=__UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) _UpperCAmelCase = output.images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _UpperCAmelCase = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _snake_case ( self : Union[str, Any] ) ->Tuple: '''simple docstring''' _UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) _UpperCAmelCase = sag_pipe.to(__UpperCamelCase ) sag_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = """.""" _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = sag_pipe( [prompt] , generator=__UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) _UpperCAmelCase = output.images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _UpperCAmelCase = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _snake_case ( self : str ) ->str: '''simple docstring''' _UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) _UpperCAmelCase = sag_pipe.to(__UpperCamelCase ) sag_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = """.""" _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = sag_pipe( [prompt] , width=7_68 , height=5_12 , generator=__UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) _UpperCAmelCase = output.images assert image.shape == (1, 5_12, 7_68, 3)
555
0
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py lowerCAmelCase_ : Optional[int] = '''src/diffusers''' # Matches is_xxx_available() lowerCAmelCase_ : Any = re.compile(r'''is\_([a-z_]*)_available\(\)''') # Matches from xxx import bla lowerCAmelCase_ : List[str] = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') lowerCAmelCase_ : Optional[Any] = ''' {0} = None ''' lowerCAmelCase_ : Optional[Any] = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) ''' lowerCAmelCase_ : List[Any] = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' def __A ( lowerCAmelCase_ ): _UpperCAmelCase : Optional[Any] = _re_backend.findall(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) == 0: return None return "_and_".join(lowerCAmelCase_ ) def __A ( ): with open(os.path.join(lowerCAmelCase_ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _UpperCAmelCase : Optional[Any] = f.readlines() # Get to the point we do the actual imports for type checking _UpperCAmelCase : int = 0 _UpperCAmelCase : Optional[Any] = {} # Go through the end of the file while line_index < len(lowerCAmelCase_ ): # If the line contains is_backend_available, we grab all objects associated with the `else` block _UpperCAmelCase : List[Any] = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 _UpperCAmelCase : List[Any] = [] # Until we unindent, add backend objects to the list while line_index < len(lowerCAmelCase_ ) and len(lines[line_index] ) > 1: _UpperCAmelCase : List[Any] = lines[line_index] _UpperCAmelCase : Optional[int] = _re_single_line_import.search(lowerCAmelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(lowerCAmelCase_ ) > 0: _UpperCAmelCase : str = objects else: line_index += 1 return backend_specific_objects def __A ( lowerCAmelCase_ , lowerCAmelCase_ ): if name.isupper(): return DUMMY_CONSTANT.format(lowerCAmelCase_ ) elif name.islower(): return DUMMY_FUNCTION.format(lowerCAmelCase_ , lowerCAmelCase_ ) else: return DUMMY_CLASS.format(lowerCAmelCase_ , lowerCAmelCase_ ) def __A ( lowerCAmelCase_=None ): if backend_specific_objects is None: _UpperCAmelCase : Union[str, Any] = read_init() # For special correspondence backend to module name as used in the function requires_modulename _UpperCAmelCase : Optional[Any] = {} for backend, objects in backend_specific_objects.items(): _UpperCAmelCase : List[Any] = """[""" + """, """.join(f"\"{b}\"" for b in backend.split("""_and_""" ) ) + """]""" _UpperCAmelCase : Tuple = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(lowerCAmelCase_ , lowerCAmelCase_ ) for o in objects] ) _UpperCAmelCase : Tuple = dummy_file return dummy_files def __A ( lowerCAmelCase_=False ): _UpperCAmelCase : List[Any] = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py _UpperCAmelCase : List[str] = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. _UpperCAmelCase : Optional[Any] = os.path.join(lowerCAmelCase_ , """utils""" ) _UpperCAmelCase : int = { backend: os.path.join(lowerCAmelCase_ , f"dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py" ) for backend in dummy_files.keys() } _UpperCAmelCase : Optional[Any] = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(lowerCAmelCase_ ): with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _UpperCAmelCase : Dict = f.read() else: _UpperCAmelCase : Dict = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"Updating diffusers.utils.dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py as the main " """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ f"diffusers.utils.dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py. Run `make fix-copies` " """to fix this.""" ) if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowerCAmelCase_ : Union[str, Any] = parser.parse_args() check_dummies(args.fix_and_overwrite)
156
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase_ : List[str] = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[Any] = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Dict = ['''LayoutLMv2FeatureExtractor'''] lowerCAmelCase_ : str = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Any = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
156
1
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Union[List[PIL.Image.Image], np.ndarray] snake_case__ : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('''>=''', '''0.0.12''') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : np.ndarray snake_case__ : List[bool] from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
682
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ ) return flax_params def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } __SCREAMING_SNAKE_CASE = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __SCREAMING_SNAKE_CASE = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flax_dict[key] __SCREAMING_SNAKE_CASE = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key].T ) else: __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_flax_param(lowerCAmelCase_ ) if not use_large: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig() __SCREAMING_SNAKE_CASE = PixaStructTextConfig() else: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = PixaStructForConditionalGeneration(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = rename_and_convert_flax_params(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) __SCREAMING_SNAKE_CASE = PixaStructImageProcessor() __SCREAMING_SNAKE_CASE = PixaStructProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) if use_large: __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = True # mkdir if needed os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) print("Model saved in {}".format(lowerCAmelCase_ ) ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') a__ : Optional[Any] = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
682
1
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class __a : UpperCamelCase_ : str UpperCamelCase_ : str = None @staticmethod def _SCREAMING_SNAKE_CASE ( )-> Union[str, Any]: """simple docstring""" raise NotImplementedError def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict )-> Union[str, Any]: """simple docstring""" raise NotImplementedError def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : str )-> Any: """simple docstring""" raise NotImplementedError def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> List[str]: """simple docstring""" if not self.is_available(): raise RuntimeError( f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] )-> int: """simple docstring""" return f"`pip install {cls.pip_package or cls.name}`" class __a ( _lowerCAmelCase ): UpperCamelCase_ : List[str] = '''optuna''' @staticmethod def _SCREAMING_SNAKE_CASE ( )-> Union[str, Any]: """simple docstring""" return is_optuna_available() def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] )-> Union[str, Any]: """simple docstring""" return run_hp_search_optuna(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Union[str, Any] )-> Union[str, Any]: """simple docstring""" return default_hp_space_optuna(UpperCAmelCase_ ) class __a ( _lowerCAmelCase ): UpperCamelCase_ : Optional[int] = '''ray''' UpperCamelCase_ : Tuple = '''\'ray[tune]\'''' @staticmethod def _SCREAMING_SNAKE_CASE ( )-> Any: """simple docstring""" return is_ray_available() def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : int )-> int: """simple docstring""" return run_hp_search_ray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : str )-> Union[str, Any]: """simple docstring""" return default_hp_space_ray(UpperCAmelCase_ ) class __a ( _lowerCAmelCase ): UpperCamelCase_ : Union[str, Any] = '''sigopt''' @staticmethod def _SCREAMING_SNAKE_CASE ( )-> List[str]: """simple docstring""" return is_sigopt_available() def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple )-> Tuple: """simple docstring""" return run_hp_search_sigopt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Tuple )-> Optional[Any]: """simple docstring""" return default_hp_space_sigopt(UpperCAmelCase_ ) class __a ( _lowerCAmelCase ): UpperCamelCase_ : Any = '''wandb''' @staticmethod def _SCREAMING_SNAKE_CASE ( )-> List[Any]: """simple docstring""" return is_wandb_available() def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] )-> str: """simple docstring""" return run_hp_search_wandb(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : List[Any] )-> List[Any]: """simple docstring""" return default_hp_space_wandb(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCamelCase__ ( )-> str: """simple docstring""" UpperCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(UpperCAmelCase_ ) > 0: UpperCamelCase = available_backends[0].name if len(UpperCAmelCase_ ) > 1: logger.info( F"{len(UpperCAmelCase_ )} hyperparameter search backends available. Using {name} as the default." ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( F" - To install {backend.name} run {backend.pip_install()}" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
702
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE = { """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = [ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
556
0
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class __lowercase ( _lowercase ): def __init__(self , A , A = None , A = None , A = False , A = False , A = None , A = None , **A , ): super().__init__( features=A , cache_dir=A , keep_in_memory=A , streaming=A , num_proc=A , **A , ) lowerCamelCase_ : Optional[Any] = Generator( cache_dir=A , features=A , generator=A , gen_kwargs=A , **A , ) def UpperCAmelCase__ (self ): # Build iterable dataset if self.streaming: lowerCamelCase_ : int = self.builder.as_streaming_dataset(split='''train''' ) # Build regular (map-style) dataset else: lowerCamelCase_ : Optional[Any] = None lowerCamelCase_ : Optional[Any] = None lowerCamelCase_ : Optional[Any] = None lowerCamelCase_ : List[str] = None self.builder.download_and_prepare( download_config=A , download_mode=A , verification_mode=A , base_path=A , num_proc=self.num_proc , ) lowerCamelCase_ : Union[str, Any] = self.builder.as_dataset( split='''train''' , verification_mode=A , in_memory=self.keep_in_memory ) return dataset
422
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowercase_ ( _lowercase ) -> str: '''simple docstring''' if ( (cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F) or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) # or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) # or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) # or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) # or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F) or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) # ): # return True return False def lowercase_ ( _lowercase ) -> Optional[int]: '''simple docstring''' for char in word: lowerCamelCase_ : List[str] = ord(_lowercase ) if not _is_chinese_char(_lowercase ): return 0 return 1 def lowercase_ ( _lowercase ) -> Dict: '''simple docstring''' lowerCamelCase_ : Union[str, Any] = set() for token in tokens: lowerCamelCase_ : Optional[int] = len(_lowercase ) > 1 and is_chinese(_lowercase ) if chinese_word: word_set.add(_lowercase ) lowerCamelCase_ : List[str] = list(_lowercase ) return word_list def lowercase_ ( _lowercase , _lowercase ) -> Optional[Any]: '''simple docstring''' if not chinese_word_set: return bert_tokens lowerCamelCase_ : List[Any] = max([len(_lowercase ) for w in chinese_word_set] ) lowerCamelCase_ : int = bert_tokens lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = 0, len(_lowercase ) while start < end: lowerCamelCase_ : int = True if is_chinese(bert_word[start] ): lowerCamelCase_ : Dict = min(end - start , _lowercase ) for i in range(_lowercase , 1 , -1 ): lowerCamelCase_ : str = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase_ : Union[str, Any] = '''##''' + bert_word[j] lowerCamelCase_ : Union[str, Any] = start + i lowerCamelCase_ : Optional[Any] = False break if single_word: start += 1 return bert_word def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> List[Any]: '''simple docstring''' lowerCamelCase_ : Optional[Any] = [] for i in range(0 , len(_lowercase ) , 100 ): lowerCamelCase_ : Any = ltp_tokenizer.seg(lines[i : i + 100] )[0] lowerCamelCase_ : Union[str, Any] = [get_chinese_word(_lowercase ) for r in res] ltp_res.extend(_lowercase ) assert len(_lowercase ) == len(_lowercase ) lowerCamelCase_ : Optional[Any] = [] for i in range(0 , len(_lowercase ) , 100 ): lowerCamelCase_ : Tuple = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowercase , truncation=_lowercase , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(_lowercase ) == len(_lowercase ) lowerCamelCase_ : str = [] for input_ids, chinese_word in zip(_lowercase , _lowercase ): lowerCamelCase_ : Tuple = [] for id in input_ids: lowerCamelCase_ : Tuple = bert_tokenizer._convert_id_to_token(_lowercase ) input_tokens.append(_lowercase ) lowerCamelCase_ : List[Any] = add_sub_symbol(_lowercase , _lowercase ) lowerCamelCase_ : Optional[Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowercase ): if token[:2] == "##": lowerCamelCase_ : Optional[int] = token[2:] # save chinese tokens' pos if len(_lowercase ) == 1 and _is_chinese_char(ord(_lowercase ) ): ref_id.append(_lowercase ) ref_ids.append(_lowercase ) assert len(_lowercase ) == len(_lowercase ) return ref_ids def lowercase_ ( _lowercase ) -> List[Any]: '''simple docstring''' with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ : List[Any] = f.readlines() lowerCamelCase_ : List[Any] = [line.strip() for line in data if len(_lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase_ : Optional[int] = LTP(args.ltp ) # faster in GPU device lowerCamelCase_ : Tuple = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase_ : int = prepare_ref(_lowercase , _lowercase , _lowercase ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: lowerCamelCase_ : Optional[Any] = [json.dumps(_lowercase ) + '''\n''' for ref in ref_ids] f.writelines(_lowercase ) if __name__ == "__main__": __lowercase : int = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') __lowercase : Union[str, Any] = parser.parse_args() main(args)
422
1
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _lowercase : """simple docstring""" def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : int=13 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=32 , __lowerCamelCase : Dict=2 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Union[str, Any]=10 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : Any=3 , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=2 , ): '''simple docstring''' lowerCamelCase__ : List[Any] = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : Any = image_size lowerCamelCase__ : List[str] = patch_size lowerCamelCase__ : Tuple = num_channels lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : List[Any] = use_labels lowerCamelCase__ : Optional[int] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : Any = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCamelCase__ : Dict = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : int = scope lowerCamelCase__ : List[str] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCamelCase__ : List[Any] = (image_size // patch_size) ** 2 lowerCamelCase__ : Tuple = num_patches + 2 def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Any = None if self.use_labels: lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Optional[Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : Any ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = TFDeiTModel(config=__lowerCamelCase ) lowerCamelCase__ : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : List[Any] = TFDeiTForMaskedImageModeling(config=__lowerCamelCase ) lowerCamelCase__ : List[str] = model(__lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase__ : Optional[Any] = 1 lowerCamelCase__ : Union[str, Any] = TFDeiTForMaskedImageModeling(__lowerCamelCase ) lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Any = model(__lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Tuple = self.type_sequence_label_size lowerCamelCase__ : int = TFDeiTForImageClassification(__lowerCamelCase ) lowerCamelCase__ : int = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ : Optional[Any] = 1 lowerCamelCase__ : Tuple = TFDeiTForImageClassification(__lowerCamelCase ) lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = config_and_inputs lowerCamelCase__ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) A__ = ( { """feature-extraction""": TFDeiTModel, """image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) A__ = False A__ = False A__ = False A__ = False def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = TFDeiTModelTester(self ) lowerCamelCase__ : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' pass def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Tuple = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Dense ) ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] = model_class(__lowerCamelCase ) lowerCamelCase__ : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : int = [*signature.parameters.keys()] lowerCamelCase__ : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def lowerCAmelCase ( self : str , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=False ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : int = TFDeiTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _lowercase ( unittest.TestCase): """simple docstring""" @cached_property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) lowerCamelCase__ : Optional[int] = self.default_image_processor lowerCamelCase__ : int = prepare_img() lowerCamelCase__ : str = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass lowerCamelCase__ : List[str] = model(**__lowerCamelCase ) # verify the logits lowerCamelCase__ : List[str] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
715
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
0
import math from collections.abc import Iterator from itertools import takewhile def UpperCAmelCase_ ( __lowerCAmelCase ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCAmelCase_ ( ) -> Iterator[int]: __lowercase : List[str] = 2 while True: if is_prime(__lowerCAmelCase ): yield num num += 1 def UpperCAmelCase_ ( __lowerCAmelCase = 2_000_000 ) -> int: return sum(takewhile(lambda __lowerCAmelCase : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'{solution() = }')
509
import numpy as np class __lowerCAmelCase : """simple docstring""" def __init__( self : Union[str, Any] ): __lowercase : Optional[int] = (0, 0) __lowercase : List[str] = None __lowercase : List[Any] = 0 __lowercase : List[Any] = 0 __lowercase : List[Any] = 0 def __eq__( self : str , _snake_case : Any ): return self.position == cell.position def snake_case_ ( self : Dict ): print(self.position ) class __lowerCAmelCase : """simple docstring""" def __init__( self : int , _snake_case : Tuple=(5, 5) ): __lowercase : Optional[int] = np.zeros(_snake_case ) __lowercase : List[Any] = world_size[0] __lowercase : List[str] = world_size[1] def snake_case_ ( self : Tuple ): print(self.w ) def snake_case_ ( self : Optional[Any] , _snake_case : int ): __lowercase : Tuple = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] __lowercase : int = cell.position[0] __lowercase : int = cell.position[1] __lowercase : str = [] for n in neughbour_cord: __lowercase : Union[str, Any] = current_x + n[0] __lowercase : int = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: __lowercase : Any = Cell() __lowercase : int = (x, y) __lowercase : Optional[Any] = cell neighbours.append(_snake_case ) return neighbours def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: __lowercase : Dict = [] __lowercase : List[Any] = [] _open.append(__lowerCAmelCase ) while _open: __lowercase : Any = np.argmin([n.f for n in _open] ) __lowercase : Any = _open[min_f] _closed.append(_open.pop(__lowerCAmelCase ) ) if current == goal: break for n in world.get_neigbours(__lowerCAmelCase ): for c in _closed: if c == n: continue __lowercase : Any = current.g + 1 __lowercase , __lowercase : Tuple = n.position __lowercase , __lowercase : str = goal.position __lowercase : str = (ya - ya) ** 2 + (xa - xa) ** 2 __lowercase : int = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(__lowerCAmelCase ) __lowercase : List[Any] = [] while current.parent is not None: path.append(current.position ) __lowercase : List[Any] = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": __lowerCAmelCase : int = Gridworld() # Start position and goal __lowerCAmelCase : int = Cell() __lowerCAmelCase : Tuple = (0, 0) __lowerCAmelCase : Optional[int] = Cell() __lowerCAmelCase : List[str] = (4, 4) print(F'path from {start.position} to {goal.position}') __lowerCAmelCase : Optional[int] = astar(world, start, goal) # Just for visual reasons. for i in s: __lowerCAmelCase : List[str] = 1 print(world.w)
509
1
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __magic_name__ ( __lowerCAmelCase): A: Optional[int] = (EulerDiscreteScheduler,) A: List[Any] = 1_0 def UpperCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : Any ) -> List[Any]: '''simple docstring''' UpperCamelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowerCamelCase__ ) return config def UpperCAmelCase__ ( self : List[Any] ) -> Any: '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ ) def UpperCAmelCase__ ( self : int ) -> Tuple: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCamelCase__ ) def UpperCAmelCase__ ( self : int ) -> List[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase__ ) def UpperCAmelCase__ ( self : int ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ : List[Any] = self.scheduler_classes[0] UpperCamelCase__ : List[Any] = self.get_scheduler_config() UpperCamelCase__ : Tuple = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase__ : str = torch.manual_seed(0 ) UpperCamelCase__ : Optional[int] = self.dummy_model() UpperCamelCase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase__ : Optional[int] = sample.to(lowerCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase__ : Optional[int] = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ : int = model(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ : Dict = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ) UpperCamelCase__ : Optional[Any] = output.prev_sample UpperCamelCase__ : int = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCamelCase__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def UpperCAmelCase__ ( self : str ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ : Any = self.scheduler_classes[0] UpperCamelCase__ : List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCamelCase__ : Optional[int] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase__ : Dict = torch.manual_seed(0 ) UpperCamelCase__ : List[Any] = self.dummy_model() UpperCamelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase__ : Optional[int] = sample.to(lowerCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase__ : List[Any] = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ : Tuple = model(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ : List[str] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ) UpperCamelCase__ : int = output.prev_sample UpperCamelCase__ : Any = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCamelCase__ : List[str] = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 0.0002 ) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3 def UpperCAmelCase__ ( self : Dict ) -> List[Any]: '''simple docstring''' UpperCamelCase__ : Optional[Any] = self.scheduler_classes[0] UpperCamelCase__ : Tuple = self.get_scheduler_config() UpperCamelCase__ : List[Any] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ ) UpperCamelCase__ : int = torch.manual_seed(0 ) UpperCamelCase__ : List[str] = self.dummy_model() UpperCamelCase__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCamelCase__ : str = sample.to(lowerCamelCase__ ) for t in scheduler.timesteps: UpperCamelCase__ : int = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ : Tuple = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ) UpperCamelCase__ : List[Any] = output.prev_sample UpperCamelCase__ : Any = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCamelCase__ : str = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: '''simple docstring''' UpperCamelCase__ : Dict = self.scheduler_classes[0] UpperCamelCase__ : Dict = self.get_scheduler_config() UpperCamelCase__ : List[str] = scheduler_class(**lowerCamelCase__ , use_karras_sigmas=lowerCamelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ ) UpperCamelCase__ : Optional[int] = torch.manual_seed(0 ) UpperCamelCase__ : List[Any] = self.dummy_model() UpperCamelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCamelCase__ : Optional[Any] = sample.to(lowerCamelCase__ ) for t in scheduler.timesteps: UpperCamelCase__ : str = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ : Any = model(lowerCamelCase__ , lowerCamelCase__ ) UpperCamelCase__ : int = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ) UpperCamelCase__ : str = output.prev_sample UpperCamelCase__ : Optional[int] = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCamelCase__ : Any = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __UpperCamelCase : Dict = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __UpperCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
106
0
"""simple docstring""" import math def __magic_name__ ( __snake_case : int ) -> Any: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __magic_name__ ( __snake_case : float = 0.1 ) -> Optional[Any]: lowercase : List[str] = 3 lowercase : str = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(snake_case_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
361
from __future__ import annotations def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ): if (voltage, current, resistance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance < 0: raise ValueError("Resistance cannot be negative" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
297
0
"""simple docstring""" import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _UpperCamelCase : Optional[int] = logging.get_logger('transformers.models.speecht5') def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Any , __snake_case : Tuple ): '''simple docstring''' hf_model.apply_weight_norm() lowercase = checkpoint['input_conv.weight_g'] lowercase = checkpoint['input_conv.weight_v'] lowercase = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): lowercase = checkpoint[f'upsamples.{i}.1.weight_g'] lowercase = checkpoint[f'upsamples.{i}.1.weight_v'] lowercase = checkpoint[f'upsamples.{i}.1.bias'] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): lowercase = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g'] lowercase = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v'] lowercase = checkpoint[f'blocks.{i}.convs1.{j}.1.bias'] lowercase = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g'] lowercase = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v'] lowercase = checkpoint[f'blocks.{i}.convs2.{j}.1.bias'] lowercase = checkpoint['output_conv.1.weight_g'] lowercase = checkpoint['output_conv.1.weight_v'] lowercase = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : Dict , __snake_case : List[Any]=None , __snake_case : int=None , ): '''simple docstring''' if config_path is not None: lowercase = SpeechTaHifiGanConfig.from_pretrained(__snake_case ) else: lowercase = SpeechTaHifiGanConfig() lowercase = SpeechTaHifiGan(__snake_case ) lowercase = torch.load(__snake_case ) load_weights(orig_checkpoint['model']['generator'] , __snake_case , __snake_case ) lowercase = np.load(__snake_case ) lowercase = stats[0].reshape(-1 ) lowercase = stats[1].reshape(-1 ) lowercase = torch.from_numpy(__snake_case ).float() lowercase = torch.from_numpy(__snake_case ).float() model.save_pretrained(__snake_case ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(__snake_case ) if __name__ == "__main__": _UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) _UpperCamelCase : Union[str, Any] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
708
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _UpperCamelCase : Dict = None _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} _UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json', }, } _UpperCamelCase : Tuple = { 'camembert-base': 5_1_2, } _UpperCamelCase : Union[str, Any] = '▁' class a ( a_ ): UpperCAmelCase_ : Dict =VOCAB_FILES_NAMES UpperCAmelCase_ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : str =["input_ids", "attention_mask"] UpperCAmelCase_ : List[Any] =CamembertTokenizer def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token super().__init__( _lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , ) lowercase = vocab_file lowercase = False if not self.vocab_file else True def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_lowerCamelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowercase = os.path.join( _lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
134
0
"""simple docstring""" import random class snake_case_ : """simple docstring""" @staticmethod def UpperCAmelCase__ ( lowerCamelCase_) -> tuple[list[int], list[int]]: UpperCamelCase = [ord(lowerCamelCase_) for i in text] UpperCamelCase = [] UpperCamelCase = [] for i in plain: UpperCamelCase = random.randint(1 , 3_0_0) UpperCamelCase = (i + k) * k cipher.append(lowerCamelCase_) key.append(lowerCamelCase_) return cipher, key @staticmethod def UpperCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_) -> str: UpperCamelCase = [] for i in range(len(lowerCamelCase_)): UpperCamelCase = int((cipher[i] - (key[i]) ** 2) / key[i]) plain.append(chr(lowerCamelCase_)) return "".join(lowerCamelCase_) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt('Hello') print(c, k) print(Onepad().decrypt(c, k))
34
"""simple docstring""" # Imports import numpy as np class __a : def __init__( self , a__=None , a__=None , a__=None , a__=None , a__=None ): self.set_matricies(red=a__ , green=a__ , blue=a__ , red_edge=a__ , nir=a__ ) def snake_case_ ( self , a__=None , a__=None , a__=None , a__=None , a__=None ): if red is not None: _lowerCamelCase = red if green is not None: _lowerCamelCase = green if blue is not None: _lowerCamelCase = blue if red_edge is not None: _lowerCamelCase = red_edge if nir is not None: _lowerCamelCase = nir return True def snake_case_ ( self , a__="" , a__=None , a__=None , a__=None , a__=None , a__=None ): self.set_matricies(red=a__ , green=a__ , blue=a__ , red_edge=a__ , nir=a__ ) _lowerCamelCase = { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!' ) return False def snake_case_ ( self ): return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def snake_case_ ( self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def snake_case_ ( self ): return self.nir * (self.red / (self.green**2)) def snake_case_ ( self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def snake_case_ ( self ): return (self.nir - self.red) / (self.nir + self.red) def snake_case_ ( self ): return (self.nir - self.blue) / (self.nir + self.blue) def snake_case_ ( self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def snake_case_ ( self ): return (self.nir - self.green) / (self.nir + self.green) def snake_case_ ( self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def snake_case_ ( self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def snake_case_ ( self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def snake_case_ ( self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def snake_case_ ( self , a__=0.08 , a__=1.22 , a__=0.03 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def snake_case_ ( self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def snake_case_ ( self ): return (self.nir / self.green) - 1 def snake_case_ ( self ): return (self.nir / self.redEdge) - 1 def snake_case_ ( self ): return (self.red - self.blue) / self.red def snake_case_ ( self ): _lowerCamelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def snake_case_ ( self ): return self.nir - self.green def snake_case_ ( self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def snake_case_ ( self ): _lowerCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) def snake_case_ ( self , a__=0.16 ): return (self.nir - self.green) / (self.nir + self.green + y) def snake_case_ ( self , a__=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def snake_case_ ( self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def snake_case_ ( self , a__=None , a__=None ): return (self.nir - b) / (a * self.red) def snake_case_ ( self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def snake_case_ ( self ): return (self.red + self.green + self.blue) / 30.5 def snake_case_ ( self ): return self.nir / self.red def snake_case_ ( self ): return (self.rvi() - 1) / (self.rvi() + 1) def snake_case_ ( self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def snake_case_ ( self ): return self.green / (self.nir + self.red + self.green) def snake_case_ ( self ): return self.nir / (self.nir + self.red + self.green) def snake_case_ ( self ): return self.red / (self.nir + self.red + self.green) def snake_case_ ( self ): return (self.green - self.red) / (self.green + self.red) def snake_case_ ( self ): return (self.red - self.green) / (self.red + self.green) def snake_case_ ( self ): _lowerCamelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) _lowerCamelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def snake_case_ ( self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def snake_case_ ( self ): return self.nir / self.red def snake_case_ ( self ): return (self.ndvi() + 0.5) ** (1 / 2) def snake_case_ ( self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
650
0
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def _lowerCAmelCase ( ) -> Tuple: _SCREAMING_SNAKE_CASE : str = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 2_0, "a " * 3_0, "b " * 7], } _SCREAMING_SNAKE_CASE : str = Dataset.from_dict(lowerCamelCase__ ) return dataset class UpperCamelCase ( __SCREAMING_SNAKE_CASE ): def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = get_dataset() _SCREAMING_SNAKE_CASE : Optional[Any] = make_duplicate_clusters(snake_case__ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = get_dataset() _SCREAMING_SNAKE_CASE : Optional[Any] = deduplicate_dataset(snake_case__ ) self.assertEqual(len(snake_case__ ) , 2 ) print(snake_case__ ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , snake_case__ )
720
"""simple docstring""" from __future__ import annotations lowercase_ : List[str] = '''#''' class UpperCamelCase : def __init__( self ): """simple docstring""" _SCREAMING_SNAKE_CASE : dict = {} def __SCREAMING_SNAKE_CASE ( self , snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self._trie for char in text: if char not in trie: _SCREAMING_SNAKE_CASE : List[str] = {} _SCREAMING_SNAKE_CASE : List[str] = trie[char] _SCREAMING_SNAKE_CASE : Optional[Any] = True def __SCREAMING_SNAKE_CASE ( self , snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self._trie for char in prefix: if char in trie: _SCREAMING_SNAKE_CASE : str = trie[char] else: return [] return self._elements(snake_case__ ) def __SCREAMING_SNAKE_CASE ( self , snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = [] for c, v in d.items(): _SCREAMING_SNAKE_CASE : int = [" "] if c == END else [(c + s) for s in self._elements(snake_case__ )] result.extend(snake_case__ ) return tuple(snake_case__ ) lowercase_ : Union[str, Any] = Trie() lowercase_ : Optional[Any] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''') for word in words: trie.insert_word(word) def _lowerCAmelCase ( lowerCamelCase__ : str ) -> tuple: _SCREAMING_SNAKE_CASE : Dict = trie.find_word(lowerCamelCase__ ) return tuple(string + word for word in suffixes ) def _lowerCAmelCase ( ) -> None: print(autocomplete_using_trie("de" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
295
0
'''simple docstring''' import numpy as np from PIL import Image def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Any ) -> np.ndarray: '''simple docstring''' UpperCAmelCase_ = np.array(snake_case_ ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 # compute the shape of the output matrix UpperCAmelCase_ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape UpperCAmelCase_ = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix UpperCAmelCase_ = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 return updated_arr def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Optional[int] ) -> np.ndarray: '''simple docstring''' UpperCAmelCase_ = np.array(snake_case_ ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 # compute the shape of the output matrix UpperCAmelCase_ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape UpperCAmelCase_ = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix UpperCAmelCase_ = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image SCREAMING_SNAKE_CASE_: Union[str, Any] =Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
78
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
0
"""simple docstring""" import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _lowercase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self : List[Any] ) -> int: '''simple docstring''' super().tearDown() gc.collect() def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' __UpperCamelCase =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) __UpperCamelCase =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) __UpperCamelCase ='''xvjiarui/stable-diffusion-2-inpainting''' __UpperCamelCase , __UpperCamelCase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase__ , safety_checker=UpperCamelCase__ ) __UpperCamelCase ='''Face of a yellow cat, high resolution, sitting on a park bench''' __UpperCamelCase =jax.random.PRNGKey(0 ) __UpperCamelCase =50 __UpperCamelCase =jax.device_count() __UpperCamelCase =num_samples * [prompt] __UpperCamelCase =num_samples * [init_image] __UpperCamelCase =num_samples * [mask_image] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =pipeline.prepare_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # shard inputs and rng __UpperCamelCase =replicate(UpperCamelCase__ ) __UpperCamelCase =jax.random.split(UpperCamelCase__ , jax.device_count() ) __UpperCamelCase =shard(UpperCamelCase__ ) __UpperCamelCase =shard(UpperCamelCase__ ) __UpperCamelCase =shard(UpperCamelCase__ ) __UpperCamelCase =pipeline( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ) __UpperCamelCase =output.images.reshape(UpperCamelCase__ , 512 , 512 , 3 ) __UpperCamelCase =images[0, 253:256, 253:256, -1] __UpperCamelCase =jnp.asarray(jax.device_get(image_slice.flatten() ) ) __UpperCamelCase =jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
296
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging __lowercase = logging.get_logger(__name__) def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ): """simple docstring""" try: with open(__UpperCamelCase , '''rb''' ) as flax_state_f: __UpperCamelCase =from_bytes(__UpperCamelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(__UpperCamelCase ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : List[str] ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights __UpperCamelCase =flatten_dict(jax.tree_util.tree_map(lambda __UpperCamelCase : x.dtype == jnp.bfloataa , __UpperCamelCase ) ).values() if any(__UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) __UpperCamelCase =jax.tree_util.tree_map( lambda __UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCamelCase ) __UpperCamelCase ='''''' __UpperCamelCase =flatten_dict(__UpperCamelCase , sep='''.''' ) __UpperCamelCase =pt_model.state_dict() # keep track of unexpected & missing keys __UpperCamelCase =[] __UpperCamelCase =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): __UpperCamelCase =flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: __UpperCamelCase =flax_key_tuple_array[:-1] + ['''weight'''] __UpperCamelCase =jnp.transpose(__UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": __UpperCamelCase =flax_key_tuple_array[:-1] + ['''weight'''] __UpperCamelCase =flax_tensor.T elif flax_key_tuple_array[-1] == "scale": __UpperCamelCase =flax_key_tuple_array[:-1] + ['''weight'''] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(__UpperCamelCase ): __UpperCamelCase =( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) __UpperCamelCase ='''.'''.join(__UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict __UpperCamelCase =np.asarray(__UpperCamelCase ) if not isinstance(__UpperCamelCase , np.ndarray ) else flax_tensor __UpperCamelCase =torch.from_numpy(__UpperCamelCase ) # remove from missing keys missing_keys.remove(__UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(__UpperCamelCase ) pt_model.load_state_dict(__UpperCamelCase ) # re-transform missing_keys to list __UpperCamelCase =list(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(__UpperCamelCase ) > 0: logger.warning( F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" ''' use it for predictions and inference.''' ) return pt_model
296
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
513
from cva import destroyAllWindows, imread, imshow, waitKey def __magic_name__ ( __a : List[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(__a ): for j in range(__a ): UpperCamelCase__ = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image lowerCamelCase_ = imread('''image_data/lena.jpg''', 1) # convert to its negative lowerCamelCase_ = convert_to_negative(img) # show result image imshow('''negative of original image''', img) waitKey(0) destroyAllWindows()
513
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _a: str = logging.get_logger(__name__) class __UpperCamelCase ( lowercase ): SCREAMING_SNAKE_CASE__ = ['pixel_values'] def __init__( self : Dict , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Dict[str, int]] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : Optional[Any] , ): '''simple docstring''' super().__init__(**lowerCAmelCase ) UpperCAmelCase_ = size if size is not None else {"shortest_edge": 256} UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase ) UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224} UpperCAmelCase_ = get_size_dict(lowerCAmelCase ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = resample UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = crop_size UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def __A ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Any , ): '''simple docstring''' UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size=size["shortest_edge"] , default_to_square=lowerCAmelCase ) return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def __A ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any] , ): '''simple docstring''' UpperCAmelCase_ = get_size_dict(lowerCAmelCase ) return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase ) def __A ( self : Tuple , lowerCAmelCase : np.ndarray , lowerCAmelCase : float , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Any ): '''simple docstring''' return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def __A ( self : Tuple , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any] , ): '''simple docstring''' return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def __A ( self : Tuple , lowerCAmelCase : ImageInput , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[float] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase : List[Any] , ): '''simple docstring''' UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase ) UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ = get_size_dict(lowerCAmelCase ) UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images] if do_center_crop: UpperCAmelCase_ = [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images] UpperCAmelCase_ = {"pixel_values": images} return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
268
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _a: Optional[int] = """CompVis/stable-diffusion-v1-1""" _a: Dict = """CompVis/stable-diffusion-v1-2""" _a: Optional[Any] = """CompVis/stable-diffusion-v1-3""" _a: str = """CompVis/stable-diffusion-v1-4""" class __UpperCamelCase ( lowercase ): def __init__( self : Union[str, Any] , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase : StableDiffusionSafetyChecker , lowerCAmelCase : CLIPImageProcessor , lowerCAmelCase : bool = True , ): '''simple docstring''' super()._init_() UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase ) UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase ) UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase ) UpperCAmelCase_ = StableDiffusionPipeline( vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , requires_safety_checker=lowerCAmelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def __A ( self : Tuple ): '''simple docstring''' return {k: getattr(self , lowerCAmelCase ) for k in self.config.keys() if not k.startswith("_" )} def __A ( self : Dict , lowerCAmelCase : Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase ) def __A ( self : int ): '''simple docstring''' self.enable_attention_slicing(lowerCAmelCase ) @torch.no_grad() def __A ( self : Union[str, Any] , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : Tuple , ): '''simple docstring''' return self.pipea( prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , ) @torch.no_grad() def __A ( self : Dict , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : List[Any] , ): '''simple docstring''' return self.pipea( prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , ) @torch.no_grad() def __A ( self : List[str] , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : Union[str, Any] , ): '''simple docstring''' return self.pipea( prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , ) @torch.no_grad() def __A ( self : Dict , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : Optional[Any] , ): '''simple docstring''' return self.pipea( prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , ) @torch.no_grad() def __A ( self : Union[str, Any] , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : Union[str, Any] , ): '''simple docstring''' UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" self.to(lowerCAmelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase_ = self.textaimg_sda_a( prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase_ = self.textaimg_sda_a( prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase_ = self.textaimg_sda_a( prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase_ = self.textaimg_sda_a( prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
268
1
from collections.abc import Sequence def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float: return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float: _UpperCAmelCase = 0.0 for coeff in reversed(__snake_case ): _UpperCAmelCase = result * x + coeff return result if __name__ == "__main__": __a: str = (0.0, 0.0, 5.0, 9.3, 7.0) __a: List[str] = 1_0.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
108
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if divisor % 5 == 0 or divisor % 2 == 0: return 0 _lowerCAmelCase = 1 _lowerCAmelCase = 1 while repunit: _lowerCAmelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def UpperCamelCase__ ( lowerCAmelCase = 1_00_00_00 ): """simple docstring""" _lowerCAmelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(lowerCAmelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F"""{solution() = }""")
207
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def a__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' lowerCAmelCase : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase : List[str] = 1_9_2 lowerCAmelCase : Union[str, Any] = 7_6_8 lowerCAmelCase : Optional[Any] = 1_2 lowerCAmelCase : Tuple = 3 lowerCAmelCase : Tuple = [8_0_0, 1_3_3_3] lowerCAmelCase : Tuple = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase : Dict = 3_3_0 lowerCAmelCase : List[Any] = 1_4 lowerCAmelCase : Tuple = 6 lowerCAmelCase : Optional[Any] = 1_3_2_0 elif "yolos_s" in yolos_name: lowerCAmelCase : Optional[int] = 3_8_4 lowerCAmelCase : str = 1_5_3_6 lowerCAmelCase : Optional[int] = 1_2 lowerCAmelCase : Union[str, Any] = 6 elif "yolos_b" in yolos_name: lowerCAmelCase : Dict = [8_0_0, 1_3_4_4] lowerCAmelCase : int = 9_1 lowerCAmelCase : Optional[int] = "huggingface/label-files" lowerCAmelCase : List[str] = "coco-detection-id2label.json" lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) lowerCAmelCase : Tuple = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCAmelCase : Optional[Any] = idalabel lowerCAmelCase : int = {v: k for k, v in idalabel.items()} return config def a__ ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : YolosConfig , SCREAMING_SNAKE_CASE : bool = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase : Any = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) lowerCAmelCase : List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase : Union[str, Any] = in_proj_weight[: config.hidden_size, :] lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size] lowerCAmelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase : List[Any] = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def a__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if "backbone" in name: lowerCAmelCase : str = name.replace("backbone" , "vit" ) if "cls_token" in name: lowerCAmelCase : Any = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: lowerCAmelCase : Any = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: lowerCAmelCase : Dict = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: lowerCAmelCase : List[str] = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowerCAmelCase : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: lowerCAmelCase : List[Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowerCAmelCase : List[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowerCAmelCase : int = name.replace("attn" , "attention.self" ) if "norm1" in name: lowerCAmelCase : Tuple = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowerCAmelCase : Dict = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowerCAmelCase : str = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowerCAmelCase : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: lowerCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: lowerCAmelCase : Any = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: lowerCAmelCase : List[Any] = name.replace("vit.norm" , "vit.layernorm" ) return name def a__ ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : YolosForObjectDetection ): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE ) if "qkv" in key: lowerCAmelCase : Dict = key.split("." ) lowerCAmelCase : Dict = int(key_split[2] ) lowerCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase : Tuple = val[:dim, :] lowerCAmelCase : List[str] = val[ dim : dim * 2, : ] lowerCAmelCase : Optional[Any] = val[-dim:, :] else: lowerCAmelCase : Dict = val[:dim] lowerCAmelCase : Union[str, Any] = val[dim : dim * 2] lowerCAmelCase : Tuple = val[-dim:] else: lowerCAmelCase : Any = val return orig_state_dict def a__ ( ): '''simple docstring''' lowerCAmelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = False ): '''simple docstring''' lowerCAmelCase : Tuple = get_yolos_config(SCREAMING_SNAKE_CASE ) # load original state_dict lowerCAmelCase : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"] # load 🤗 model lowerCAmelCase : Tuple = YolosForObjectDetection(SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase : List[Any] = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase : Optional[int] = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2 lowerCAmelCase : Dict = YolosImageProcessor(format="coco_detection" , size=SCREAMING_SNAKE_CASE ) lowerCAmelCase : Any = image_processor(images=prepare_img() , return_tensors="pt" ) lowerCAmelCase : Any = model(**SCREAMING_SNAKE_CASE ) lowerCAmelCase : Union[str, Any] = outputs.logits, outputs.pred_boxes lowerCAmelCase : List[str] = None, None if yolos_name == "yolos_ti": lowerCAmelCase : Optional[int] = torch.tensor( [[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] ) lowerCAmelCase : Union[str, Any] = torch.tensor( [[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase : List[str] = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] ) lowerCAmelCase : Tuple = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase : List[Any] = torch.tensor( [[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] ) lowerCAmelCase : Union[str, Any] = torch.tensor( [[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase : List[Any] = torch.tensor( [[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] ) lowerCAmelCase : Union[str, Any] = torch.tensor( [[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] ) elif yolos_name == "yolos_base": lowerCAmelCase : Tuple = torch.tensor( [[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] ) lowerCAmelCase : Union[str, Any] = torch.tensor( [[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] ) else: raise ValueError(f"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: lowerCAmelCase : Tuple = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) lowerCAmelCase : Union[str, Any] = model_mapping[yolos_name] image_processor.push_to_hub(SCREAMING_SNAKE_CASE , organization="hustvl" ) model.push_to_hub(SCREAMING_SNAKE_CASE , organization="hustvl" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
706
"""simple docstring""" from math import factorial def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ): '''simple docstring''' return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
681
0
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = """RegNetConfig""" # Base docstring _lowerCAmelCase = """facebook/regnet-y-040""" _lowerCAmelCase = [1, 1088, 7, 7] # Image classification docstring _lowerCAmelCase = """facebook/regnet-y-040""" _lowerCAmelCase = """tabby, tabby cat""" _lowerCAmelCase = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self , A__ , A__ = 3 , A__ = 1 , A__ = 1 , A__ = "relu" , **A__ , ): """simple docstring""" super().__init__(**A__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb UpperCAmelCase_: Union[str, Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) UpperCAmelCase_: Dict = tf.keras.layers.ConvaD( filters=A__ , kernel_size=A__ , strides=A__ , padding="VALID" , groups=A__ , use_bias=A__ , name="convolution" , ) UpperCAmelCase_: List[str] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) UpperCAmelCase_: Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def snake_case_ ( self , A__ ): """simple docstring""" UpperCAmelCase_: List[Any] = self.convolution(self.padding(A__ ) ) UpperCAmelCase_: str = self.normalization(A__ ) UpperCAmelCase_: Any = self.activation(A__ ) return hidden_state class UpperCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self , A__ , **A__ ): """simple docstring""" super().__init__(**A__ ) UpperCAmelCase_: int = config.num_channels UpperCAmelCase_: Tuple = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def snake_case_ ( self , A__ ): """simple docstring""" UpperCAmelCase_: List[str] = shape_list(A__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) UpperCAmelCase_: Tuple = tf.transpose(A__ , perm=(0, 2, 3, 1) ) UpperCAmelCase_: Optional[Any] = self.embedder(A__ ) return hidden_state class UpperCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self , A__ , A__ = 2 , **A__ ): """simple docstring""" super().__init__(**A__ ) UpperCAmelCase_: List[str] = tf.keras.layers.ConvaD( filters=A__ , kernel_size=1 , strides=A__ , use_bias=A__ , name="convolution" ) UpperCAmelCase_: Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def snake_case_ ( self , A__ , A__ = False ): """simple docstring""" return self.normalization(self.convolution(A__ ) , training=A__ ) class UpperCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self , A__ , A__ , **A__ ): """simple docstring""" super().__init__(**A__ ) UpperCAmelCase_: List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A__ , name="pooler" ) UpperCAmelCase_: Dict = [ tf.keras.layers.ConvaD(filters=A__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=A__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def snake_case_ ( self , A__ ): """simple docstring""" UpperCAmelCase_: List[str] = self.pooler(A__ ) for layer_module in self.attention: UpperCAmelCase_: Optional[Any] = layer_module(A__ ) UpperCAmelCase_: Union[str, Any] = hidden_state * pooled return hidden_state class UpperCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self , A__ , A__ , A__ , A__ = 1 , **A__ ): """simple docstring""" super().__init__(**A__ ) UpperCAmelCase_: Dict = in_channels != out_channels or stride != 1 UpperCAmelCase_: Optional[Any] = max(1 , out_channels // config.groups_width ) UpperCAmelCase_: Dict = ( TFRegNetShortCut(A__ , stride=A__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. UpperCAmelCase_: Tuple = [ TFRegNetConvLayer(A__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( A__ , stride=A__ , groups=A__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(A__ , kernel_size=1 , activation=A__ , name="layer.2" ), ] UpperCAmelCase_: List[Any] = ACTaFN[config.hidden_act] def snake_case_ ( self , A__ ): """simple docstring""" UpperCAmelCase_: Optional[int] = hidden_state for layer_module in self.layers: UpperCAmelCase_: Any = layer_module(A__ ) UpperCAmelCase_: Dict = self.shortcut(A__ ) hidden_state += residual UpperCAmelCase_: Optional[int] = self.activation(A__ ) return hidden_state class UpperCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self , A__ , A__ , A__ , A__ = 1 , **A__ ): """simple docstring""" super().__init__(**A__ ) UpperCAmelCase_: Tuple = in_channels != out_channels or stride != 1 UpperCAmelCase_: int = max(1 , out_channels // config.groups_width ) UpperCAmelCase_: List[Any] = ( TFRegNetShortCut(A__ , stride=A__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) UpperCAmelCase_: List[str] = [ TFRegNetConvLayer(A__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( A__ , stride=A__ , groups=A__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(A__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(A__ , kernel_size=1 , activation=A__ , name="layer.3" ), ] UpperCAmelCase_: int = ACTaFN[config.hidden_act] def snake_case_ ( self , A__ ): """simple docstring""" UpperCAmelCase_: Tuple = hidden_state for layer_module in self.layers: UpperCAmelCase_: List[Any] = layer_module(A__ ) UpperCAmelCase_: List[Any] = self.shortcut(A__ ) hidden_state += residual UpperCAmelCase_: Optional[int] = self.activation(A__ ) return hidden_state class UpperCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self , A__ , A__ , A__ , A__ = 2 , A__ = 2 , **A__ ): """simple docstring""" super().__init__(**A__ ) UpperCAmelCase_: Optional[Any] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer UpperCAmelCase_: Optional[Any] = [ # downsampling is done in the first layer with stride of 2 layer(A__ , A__ , A__ , stride=A__ , name="layers.0" ), *[layer(A__ , A__ , A__ , name=F"layers.{i+1}" ) for i in range(depth - 1 )], ] def snake_case_ ( self , A__ ): """simple docstring""" for layer_module in self.layers: UpperCAmelCase_: List[Any] = layer_module(A__ ) return hidden_state class UpperCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self , A__ , **A__ ): """simple docstring""" super().__init__(**A__ ) UpperCAmelCase_: List[str] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( A__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) UpperCAmelCase_: int = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(A__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(A__ , A__ , A__ , depth=A__ , name=F"stages.{i+1}" ) ) def snake_case_ ( self , A__ , A__ = False , A__ = True ): """simple docstring""" UpperCAmelCase_: Any = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCAmelCase_: Dict = hidden_states + (hidden_state,) UpperCAmelCase_: Optional[Any] = stage_module(A__ ) if output_hidden_states: UpperCAmelCase_: Tuple = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=A__ , hidden_states=A__ ) @keras_serializable class UpperCAmelCase__ ( tf.keras.layers.Layer ): snake_case_ = RegNetConfig def __init__( self , A__ , **A__ ): """simple docstring""" super().__init__(**A__ ) UpperCAmelCase_: Optional[int] = config UpperCAmelCase_: List[str] = TFRegNetEmbeddings(A__ , name="embedder" ) UpperCAmelCase_: Union[str, Any] = TFRegNetEncoder(A__ , name="encoder" ) UpperCAmelCase_: str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A__ , name="pooler" ) @unpack_inputs def snake_case_ ( self , A__ , A__ = None , A__ = None , A__ = False , ): """simple docstring""" UpperCAmelCase_: Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_: str = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_: Tuple = self.embedder(A__ , training=A__ ) UpperCAmelCase_: List[str] = self.encoder( A__ , output_hidden_states=A__ , return_dict=A__ , training=A__ ) UpperCAmelCase_: List[str] = encoder_outputs[0] UpperCAmelCase_: Optional[Any] = self.pooler(A__ ) # Change to NCHW output format have uniformity in the modules UpperCAmelCase_: str = tf.transpose(A__ , perm=(0, 3, 1, 2) ) UpperCAmelCase_: Optional[int] = tf.transpose(A__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: UpperCAmelCase_: Tuple = tuple([tf.transpose(A__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A__ , pooler_output=A__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCAmelCase__ ( _lowercase ): snake_case_ = RegNetConfig snake_case_ = """regnet""" snake_case_ = """pixel_values""" @property def snake_case_ ( self ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = R""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ _lowerCAmelCase = R""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , _lowercase , ) class UpperCAmelCase__ ( _lowercase ): def __init__( self , A__ , *A__ , **A__ ): """simple docstring""" super().__init__(A__ , *A__ , **A__ ) UpperCAmelCase_: List[Any] = TFRegNetMainLayer(A__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(A__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=A__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case_ ( self , A__ , A__ = None , A__ = None , A__=False , ): """simple docstring""" UpperCAmelCase_: int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_: Tuple = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_: Optional[Any] = self.regnet( pixel_values=A__ , output_hidden_states=A__ , return_dict=A__ , training=A__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , _lowercase , ) class UpperCAmelCase__ ( _lowercase , _lowercase ): def __init__( self , A__ , *A__ , **A__ ): """simple docstring""" super().__init__(A__ , *A__ , **A__ ) UpperCAmelCase_: Optional[Any] = config.num_labels UpperCAmelCase_: Tuple = TFRegNetMainLayer(A__ , name="regnet" ) # classification head UpperCAmelCase_: int = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(A__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case_ ( self , A__ = None , A__ = None , A__ = None , A__ = None , A__=False , ): """simple docstring""" UpperCAmelCase_: int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_: Any = self.regnet( A__ , output_hidden_states=A__ , return_dict=A__ , training=A__ ) UpperCAmelCase_: List[str] = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase_: Dict = self.classifier[0](A__ ) UpperCAmelCase_: List[Any] = self.classifier[1](A__ ) UpperCAmelCase_: Any = None if labels is None else self.hf_compute_loss(labels=A__ , logits=A__ ) if not return_dict: UpperCAmelCase_: Optional[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=A__ , logits=A__ , hidden_states=outputs.hidden_states )
137
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case__ : Dict = { """configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""], """feature_extraction_whisper""": ["""WhisperFeatureExtractor"""], """processing_whisper""": ["""WhisperProcessor"""], """tokenization_whisper""": ["""WhisperTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : str = ["""WhisperTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[int] = [ """WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """WhisperForConditionalGeneration""", """WhisperModel""", """WhisperPreTrainedModel""", """WhisperForAudioClassification""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[str] = [ """TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWhisperForConditionalGeneration""", """TFWhisperModel""", """TFWhisperPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Any = [ """FlaxWhisperForConditionalGeneration""", """FlaxWhisperModel""", """FlaxWhisperPreTrainedModel""", """FlaxWhisperForAudioClassification""", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
402
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
703
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class A__ ( A ): """simple docstring""" def __init__( self : Tuple , *A_ : Optional[int] , **A_ : int ): '''simple docstring''' warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , A_ , ) super().__init__(*A_ , **A_ )
503
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Dict = logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = """git_vision_model""" def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any]=768 , UpperCAmelCase : int=3072 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : int=3 , UpperCAmelCase : str=224 , UpperCAmelCase : Any=16 , UpperCAmelCase : Dict="quick_gelu" , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[Any]=0.0_2 , **UpperCAmelCase : str , ) -> str: super().__init__(**UpperCAmelCase ) lowerCamelCase__ : Tuple = hidden_size lowerCamelCase__ : Union[str, Any] = intermediate_size lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : Any = num_attention_heads lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Optional[int] = patch_size lowerCamelCase__ : Dict = image_size lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : Optional[Any] = hidden_act @classmethod def A_ ( cls : Optional[int] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : str ) -> "PretrainedConfig": cls._set_token_in_kwargs(UpperCAmelCase ) lowerCamelCase__ , lowerCamelCase__ : Tuple = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase ) # get the vision config dict if we are loading from GITConfig if config_dict.get('model_type' ) == "git": lowerCamelCase__ : str = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase , **UpperCAmelCase ) class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = """git""" def __init__( self : List[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Optional[int]=30522 , UpperCAmelCase : Tuple=768 , UpperCAmelCase : int=6 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Dict=3072 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Tuple=1024 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : Tuple=1e-12 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any="absolute" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[int]=101 , UpperCAmelCase : Optional[int]=102 , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[int] , ) -> List[str]: super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , pad_token_id=UpperCAmelCase , **UpperCAmelCase ) if vision_config is None: lowerCamelCase__ : Optional[Any] = {} logger.info('vision_config is None. initializing the GitVisionConfig with default values.' ) lowerCamelCase__ : Optional[Any] = GitVisionConfig(**UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : List[str] = num_hidden_layers lowerCamelCase__ : Dict = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Tuple = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Dict = initializer_range lowerCamelCase__ : str = layer_norm_eps lowerCamelCase__ : Optional[Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : List[str] = tie_word_embeddings lowerCamelCase__ : List[Any] = num_image_with_embedding lowerCamelCase__ : Any = bos_token_id lowerCamelCase__ : List[Any] = eos_token_id def A_ ( self : Optional[int] ) -> Optional[Any]: lowerCamelCase__ : Optional[int] = copy.deepcopy(self.__dict__ ) lowerCamelCase__ : Dict = self.vision_config.to_dict() lowerCamelCase__ : Any = self.__class__.model_type return output
295
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 200 ) -> int: lowerCamelCase__ : Dict = [1, 2, 5, 10, 20, 50, 100, 200] lowerCamelCase__ : Union[str, Any] = [0] * (pence + 1) lowerCamelCase__ : List[str] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_UpperCAmelCase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
295
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" lowerCAmelCase__ ='''decision_transformer''' lowerCAmelCase__ =['''past_key_values'''] lowerCAmelCase__ ={ '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , __SCREAMING_SNAKE_CASE=17 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=4096 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=5_0256 , __SCREAMING_SNAKE_CASE=5_0256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" snake_case__ : Dict =state_dim snake_case__ : Optional[int] =act_dim snake_case__ : int =hidden_size snake_case__ : int =max_ep_len snake_case__ : Any =action_tanh snake_case__ : str =vocab_size snake_case__ : Dict =n_positions snake_case__ : Union[str, Any] =n_layer snake_case__ : Optional[int] =n_head snake_case__ : Any =n_inner snake_case__ : List[Any] =activation_function snake_case__ : Optional[Any] =resid_pdrop snake_case__ : str =embd_pdrop snake_case__ : Union[str, Any] =attn_pdrop snake_case__ : List[str] =layer_norm_epsilon snake_case__ : int =initializer_range snake_case__ : List[str] =scale_attn_weights snake_case__ : str =use_cache snake_case__ : Optional[Any] =scale_attn_by_inverse_layer_idx snake_case__ : Optional[int] =reorder_and_upcast_attn snake_case__ : Dict =bos_token_id snake_case__ : int =eos_token_id super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
707
def lowercase_ ( SCREAMING_SNAKE_CASE : str ): """simple docstring""" snake_case__ : Optional[int] =len(SCREAMING_SNAKE_CASE ) snake_case__ : int =sum(SCREAMING_SNAKE_CASE ) snake_case__ : str =[[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): snake_case__ : Any =True for i in range(1 , s + 1 ): snake_case__ : Dict =False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): snake_case__ : Union[str, Any] =dp[i][j - 1] if arr[i - 1] <= j: snake_case__ : Union[str, Any] =dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: snake_case__ : Dict =s - 2 * j break return diff
408
0