code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
def __lowercase ( snake_case ): """simple docstring""" if not isinstance(snake_case, snake_case ): raise TypeError('''only integers accepted as input''' ) else: __magic_name__ :List[Any] = str(abs(snake_case ) ) __magic_name__ :Dict = [list(snake_case ) for char in range(len(snake_case ) )] for index in range(len(snake_case ) ): num_transpositions[index].pop(snake_case ) return max( int(''''''.join(list(snake_case ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
0
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
2
0
from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class __lowerCamelCase (_a ): _lowercase = ["""image_processor"""] _lowercase = """SamImageProcessor""" def __init__( self: Dict,A_: Union[str, Any] ): '''simple docstring''' super().__init__(A_ ) __UpperCamelCase = self.image_processor __UpperCamelCase = -10 __UpperCamelCase = self.image_processor.size['longest_edge'] def __call__( self: Optional[Any],A_: Optional[int]=None,A_: int=None,A_: str=None,A_: str=None,A_: Optional[Union[str, TensorType]] = None,**A_: Optional[int],): '''simple docstring''' __UpperCamelCase = self.image_processor( A_,return_tensors=A_,**A_,) # pop arguments that are not used in the foward but used nevertheless __UpperCamelCase = encoding_image_processor['original_sizes'] if hasattr(A_,'numpy' ): # Checks if Torch or TF tensor __UpperCamelCase = original_sizes.numpy() __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self._check_and_preprocess_points( input_points=A_,input_labels=A_,input_boxes=A_,) __UpperCamelCase = self._normalize_and_convert( A_,A_,input_points=A_,input_labels=A_,input_boxes=A_,return_tensors=A_,) return encoding_image_processor def snake_case_ ( self: Tuple,A_: Any,A_: str,A_: Dict=None,A_: Dict=None,A_: int=None,A_: List[Any]="pt",): '''simple docstring''' if input_points is not None: if len(A_ ) != len(A_ ): __UpperCamelCase = [ self._normalize_coordinates(self.target_size,A_,original_sizes[0] ) for point in input_points ] else: __UpperCamelCase = [ self._normalize_coordinates(self.target_size,A_,A_ ) for point, original_size in zip(A_,A_ ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: __UpperCamelCase, __UpperCamelCase = self._pad_points_and_labels(A_,A_ ) __UpperCamelCase = np.array(A_ ) if input_labels is not None: __UpperCamelCase = np.array(A_ ) if input_boxes is not None: if len(A_ ) != len(A_ ): __UpperCamelCase = [ self._normalize_coordinates(self.target_size,A_,original_sizes[0],is_bounding_box=A_ ) for box in input_boxes ] else: __UpperCamelCase = [ self._normalize_coordinates(self.target_size,A_,A_,is_bounding_box=A_ ) for box, original_size in zip(A_,A_ ) ] __UpperCamelCase = np.array(A_ ) if input_boxes is not None: if return_tensors == "pt": __UpperCamelCase = torch.from_numpy(A_ ) # boxes batch size of 1 by default __UpperCamelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": __UpperCamelCase = tf.convert_to_tensor(A_ ) # boxes batch size of 1 by default __UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({'input_boxes': input_boxes} ) if input_points is not None: if return_tensors == "pt": __UpperCamelCase = torch.from_numpy(A_ ) # point batch size of 1 by default __UpperCamelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": __UpperCamelCase = tf.convert_to_tensor(A_ ) # point batch size of 1 by default __UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({'input_points': input_points} ) if input_labels is not None: if return_tensors == "pt": __UpperCamelCase = torch.from_numpy(A_ ) # point batch size of 1 by default __UpperCamelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": __UpperCamelCase = tf.convert_to_tensor(A_ ) # point batch size of 1 by default __UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({'input_labels': input_labels} ) return encoding_image_processor def snake_case_ ( self: List[str],A_: int,A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = max([point.shape[0] for point in input_points] ) __UpperCamelCase = [] for i, point in enumerate(A_ ): if point.shape[0] != expected_nb_points: __UpperCamelCase = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 ) __UpperCamelCase = np.append(input_labels[i],[self.point_pad_value] ) processed_input_points.append(A_ ) __UpperCamelCase = processed_input_points return input_points, input_labels def snake_case_ ( self: Optional[int],A_: int,A_: np.ndarray,A_: Union[str, Any],A_: int=False ): '''simple docstring''' __UpperCamelCase, __UpperCamelCase = original_size __UpperCamelCase, __UpperCamelCase = self.image_processor._get_preprocess_shape(A_,longest_edge=A_ ) __UpperCamelCase = deepcopy(A_ ).astype(A_ ) if is_bounding_box: __UpperCamelCase = coords.reshape(-1,2,2 ) __UpperCamelCase = coords[..., 0] * (new_w / old_w) __UpperCamelCase = coords[..., 1] * (new_h / old_h) if is_bounding_box: __UpperCamelCase = coords.reshape(-1,4 ) return coords def snake_case_ ( self: Dict,A_: Optional[Any]=None,A_: List[str]=None,A_: Tuple=None,): '''simple docstring''' if input_points is not None: if hasattr(A_,'numpy' ): # Checks for TF or Torch tensor __UpperCamelCase = input_points.numpy().tolist() if not isinstance(A_,A_ ) or not isinstance(input_points[0],A_ ): raise ValueError('Input points must be a list of list of floating points.' ) __UpperCamelCase = [np.array(A_ ) for input_point in input_points] else: __UpperCamelCase = None if input_labels is not None: if hasattr(A_,'numpy' ): __UpperCamelCase = input_labels.numpy().tolist() if not isinstance(A_,A_ ) or not isinstance(input_labels[0],A_ ): raise ValueError('Input labels must be a list of list integers.' ) __UpperCamelCase = [np.array(A_ ) for label in input_labels] else: __UpperCamelCase = None if input_boxes is not None: if hasattr(A_,'numpy' ): __UpperCamelCase = input_boxes.numpy().tolist() if ( not isinstance(A_,A_ ) or not isinstance(input_boxes[0],A_ ) or not isinstance(input_boxes[0][0],A_ ) ): raise ValueError('Input boxes must be a list of list of list of floating points.' ) __UpperCamelCase = [np.array(A_ ).astype(np.floataa ) for box in input_boxes] else: __UpperCamelCase = None return input_points, input_labels, input_boxes @property def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(A_ ) ) def snake_case_ ( self: int,*A_: int,**A_: Tuple ): '''simple docstring''' return self.image_processor.post_process_masks(*A_,**A_ )
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "xlnet" a__ : Dict = ["mems"] a__ : List[str] = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple: _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs['''use_cache'''] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
2
0
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase): def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 255 , A_=True , )-> int: '''simple docstring''' UpperCamelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std UpperCamelCase = do_rescale UpperCamelCase = rescale_factor UpperCamelCase = do_pad def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase_ ( self , A_ , A_=False )-> Union[str, Any]: '''simple docstring''' if not batched: UpperCamelCase = image_inputs[0] if isinstance(A_ , Image.Image ): UpperCamelCase , UpperCamelCase = image.size else: UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2] if w < h: UpperCamelCase = int(self.size['shortest_edge'] * h / w ) UpperCamelCase = self.size['shortest_edge'] elif w > h: UpperCamelCase = self.size['shortest_edge'] UpperCamelCase = int(self.size['shortest_edge'] * w / h ) else: UpperCamelCase = self.size['shortest_edge'] UpperCamelCase = self.size['shortest_edge'] else: UpperCamelCase = [] for image in image_inputs: UpperCamelCase , UpperCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase = max(A_ , key=lambda A_ : item[0] )[0] UpperCamelCase = max(A_ , key=lambda A_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase): lowerCAmelCase_ = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' UpperCamelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , 'image_mean' ) ) self.assertTrue(hasattr(A_ , 'image_std' ) ) self.assertTrue(hasattr(A_ , 'do_normalize' ) ) self.assertTrue(hasattr(A_ , 'do_resize' ) ) self.assertTrue(hasattr(A_ , 'do_rescale' ) ) self.assertTrue(hasattr(A_ , 'do_pad' ) ) self.assertTrue(hasattr(A_ , 'size' ) ) def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , A_ ) UpperCamelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , A_ ) def UpperCAmelCase_ ( self )-> int: '''simple docstring''' pass def UpperCAmelCase_ ( self )-> int: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase_ ( self )-> str: '''simple docstring''' UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: UpperCamelCase = json.loads(f.read() ) UpperCamelCase = {'image_id': 39769, 'annotations': target} # encode them UpperCamelCase = DeformableDetrImageProcessor() UpperCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='pt' ) # verify pixel values UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , A_ ) UpperCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1e-4 ) ) # verify area UpperCamelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) ) # verify boxes UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ ) UpperCamelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1e-3 ) ) # verify image_id UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) ) # verify is_crowd UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) ) # verify class_labels UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) ) # verify orig_size UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) ) # verify size UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) ) @slow def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: UpperCamelCase = json.loads(f.read() ) UpperCamelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} UpperCamelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them UpperCamelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) UpperCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='pt' ) # verify pixel values UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , A_ ) UpperCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1e-4 ) ) # verify area UpperCamelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) ) # verify boxes UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ ) UpperCamelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1e-3 ) ) # verify image_id UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) ) # verify is_crowd UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) ) # verify class_labels UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) ) # verify masks UpperCamelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A_ ) # verify orig_size UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) ) # verify size UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
3
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str: return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
"""simple docstring""" import os from collections.abc import Iterator def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ): lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"): yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): return F'{i * " "}*' if i else "\n##" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part: print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' ) return new_path def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): lowerCAmelCase = '' for filepath in sorted(good_file_paths(_UpperCAmelCase ) ): lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase ) if filepath != old_path: lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0 lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' ) lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0] print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('''.''')
4
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
'''simple docstring''' from __future__ import annotations def A (__lowerCamelCase :list[int | float] , __lowerCamelCase :int , __lowerCamelCase :int ): if len(__lowerCamelCase ) == 0: raise ValueError("""find_max() arg is an empty sequence""" ) if ( left >= len(__lowerCamelCase ) or left < -len(__lowerCamelCase ) or right >= len(__lowerCamelCase ) or right < -len(__lowerCamelCase ) ): raise IndexError("""list index out of range""" ) if left == right: return nums[left] _lowerCAmelCase = (left + right) >> 1 # the middle _lowerCAmelCase = find_max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # find max in range[left, mid] _lowerCAmelCase = find_max(__lowerCamelCase , mid + 1 , __lowerCamelCase ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
5
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
0
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowerCamelCase = logging.get_logger(__name__) # General docstring _lowerCamelCase = 'PoolFormerConfig' # Base docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = [1, 512, 7, 7] # Image classification docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = 'tabby, tabby cat' _lowerCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ): if drop_prob == 0.0 or not training: return input SCREAMING_SNAKE_CASE__ = 1 - drop_prob SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor return output class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = drop_prob def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor: """simple docstring""" return drop_path(__A , self.drop_prob , self.training ) def _snake_case ( self :Dict ) -> str: """simple docstring""" return "p={}".format(self.drop_prob ) class UpperCamelCase_ ( nn.Module ): def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size) SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride) SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A ) SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity() def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.projection(__A ) SCREAMING_SNAKE_CASE__ = self.norm(__A ) return embeddings class UpperCamelCase_ ( nn.GroupNorm ): def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict: """simple docstring""" super().__init__(1 , __A , **__A ) class UpperCamelCase_ ( nn.Module ): def __init__( self :List[str] , __A :Optional[int] ) -> Any: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A ) def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.pool(__A ) - hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if isinstance(config.hidden_act , __A ): SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act] else: SCREAMING_SNAKE_CASE__ = config.hidden_act def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.act_fn(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) # Useful for training neural nets SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity() SCREAMING_SNAKE_CASE__ = config.use_layer_scale if config.use_layer_scale: SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str: """simple docstring""" if self.use_layer_scale: SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = () SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs else: SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) ) # First residual connection SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states SCREAMING_SNAKE_CASE__ = () # Second residual connection inside the PoolFormerOutput block SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) ) SCREAMING_SNAKE_CASE__ = hidden_states + layer_output SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs class UpperCamelCase_ ( nn.Module ): def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = config # stochastic depth decay rule SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings SCREAMING_SNAKE_CASE__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) # Transformer blocks SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers SCREAMING_SNAKE_CASE__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__A ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None SCREAMING_SNAKE_CASE__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers # Get patch embeddings from hidden_states SCREAMING_SNAKE_CASE__ = embedding_layer(__A ) # Send the embeddings through the blocks for _, blk in enumerate(__A ): SCREAMING_SNAKE_CASE__ = blk(__A ) SCREAMING_SNAKE_CASE__ = layer_outputs[0] if output_hidden_states: SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A ) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PoolFormerConfig lowerCamelCase_ = "poolformer" lowerCamelCase_ = "pixel_values" lowerCamelCase_ = True def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict: """simple docstring""" if isinstance(__A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__A , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any: """simple docstring""" if isinstance(__A , __A ): SCREAMING_SNAKE_CASE__ = value _lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Any ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A ) # Initialize weights and apply final processing self.post_init() def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) SCREAMING_SNAKE_CASE__ = self.encoder( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , ) class UpperCamelCase_ ( nn.Module ): def __init__( self :int , __A :Optional[int] ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size ) def _snake_case ( self :List[Any] , __A :Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.dense(__A ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :str , __A :Union[str, Any] ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config.num_labels SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A ) # Final norm SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head SCREAMING_SNAKE_CASE__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ = self.poolformer( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = outputs[0] SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) ) SCREAMING_SNAKE_CASE__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE__ = """single_label_classification""" else: SCREAMING_SNAKE_CASE__ = """multi_label_classification""" if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE__ = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE__ = CrossEntropyLoss() SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) if not return_dict: SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
6
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
0
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys a = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') a = ( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split() ) a = '''|'''.join(sys.argv[1:]) a = re.compile(rF'''^({joined_dirs}).*?\.py$''') a = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
7
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
0
'''simple docstring''' def _lowerCAmelCase ( __snake_case : Dict ) -> Tuple: __A : Any = [0] * len(__snake_case ) __A : Dict = [] __A : List[Any] = [1] * len(__snake_case ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__snake_case ) ): if indegree[i] == 0: queue.append(__snake_case ) while queue: __A : Tuple = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: __A : Any = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(__snake_case ) print(max(__snake_case ) ) # Adjacency list of Graph lowercase__ : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
8
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
0
import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) ) self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) ) class __lowerCAmelCase : """simple docstring""" def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = num_encoder_blocks A__ = sr_ratios A__ = depths A__ = hidden_sizes A__ = downsampling_rates A__ = num_attention_heads A__ = is_training A__ = use_labels A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = num_labels A__ = scope def _a ( self : int ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def _a ( self : int ): """simple docstring""" return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ): """simple docstring""" A__ = SegformerModel(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ): """simple docstring""" A__ = self.num_labels A__ = SegformerForSemanticSegmentation(_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) A__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ): """simple docstring""" A__ = 1 A__ = SegformerForSemanticSegmentation(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case ) A__ = model(_snake_case , labels=_snake_case ) self.parent.assertGreater(result.loss , 0.0 ) def _a ( self : List[Any] ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Optional[int] = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) A__ : Union[str, Any] = ( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) A__ : Optional[Any] = True A__ : str = False A__ : Tuple = False A__ : Dict = False def _a ( self : Union[str, Any] ): """simple docstring""" A__ = SegformerModelTester(self ) A__ = SegformerConfigTester(self , config_class=_snake_case ) def _a ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _a ( self : List[Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case ) def _a ( self : Tuple ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*_snake_case ) @unittest.skip('SegFormer does not use inputs_embeds' ) def _a ( self : List[Any] ): """simple docstring""" pass @unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' ) def _a ( self : Dict ): """simple docstring""" pass def _a ( self : Dict ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def _a ( self : Dict ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) ) A__ = outputs.attentions A__ = sum(self.model_tester.depths ) self.assertEqual(len(_snake_case ) , _snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) ) A__ = outputs.attentions self.assertEqual(len(_snake_case ) , _snake_case ) # verify the first attentions (first block, first layer) A__ = (self.model_tester.image_size // 4) ** 2 A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) A__ = (self.model_tester.image_size // 32) ** 2 A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) A__ = len(_snake_case ) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) ) self.assertEqual(out_len + 1 , len(_snake_case ) ) A__ = outputs.attentions self.assertEqual(len(_snake_case ) , _snake_case ) # verify the first attentions (first block, first layer) A__ = (self.model_tester.image_size // 4) ** 2 A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def _a ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ): A__ = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) ) A__ = outputs.hidden_states A__ = self.model_tester.num_encoder_blocks self.assertEqual(len(_snake_case ) , _snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) def _a ( self : Tuple ): """simple docstring""" if not self.model_tester.is_training: return A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: if model_class in get_values(_snake_case ): continue A__ = model_class(_snake_case ) model.to(_snake_case ) model.train() A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) A__ = model(**_snake_case ).loss loss.backward() @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _a ( self : Optional[Any] ): """simple docstring""" pass @slow def _a ( self : Tuple ): """simple docstring""" for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = SegformerModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def A ( ) -> str: A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _a ( self : Dict ): """simple docstring""" A__ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case ) A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to( _snake_case ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = encoded_inputs.pixel_values.to(_snake_case ) with torch.no_grad(): A__ = model(_snake_case ) A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) ) @slow def _a ( self : Optional[Any] ): """simple docstring""" A__ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case ) A__ = SegformerForSemanticSegmentation.from_pretrained( 'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = encoded_inputs.pixel_values.to(_snake_case ) with torch.no_grad(): A__ = model(_snake_case ) A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) ) @slow def _a ( self : Any ): """simple docstring""" A__ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case ) A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to( _snake_case ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = encoded_inputs.pixel_values.to(_snake_case ) with torch.no_grad(): A__ = model(_snake_case ) A__ = outputs.logits.detach().cpu() A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] ) A__ = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , _snake_case ) A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case ) A__ = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape , _snake_case )
9
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = "openai-gpt" a__ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = afn _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_first_dropout _A = summary_proj_to_labels super().__init__(**__lowerCAmelCase )
2
0
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration _lowerCAmelCase = [ # tf -> hf ("/", "."), ("layer_", "layers."), ("kernel", "weight"), ("beta", "bias"), ("gamma", "weight"), ("pegasus", "model"), ] _lowerCAmelCase = [ (".output.dense", ".fc2"), ("intermediate.LayerNorm", "final_layer_norm"), ("intermediate.dense", "fc1"), ] _lowerCAmelCase = ( INIT_COMMON + [ ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.out_proj"), ("attention.self", "self_attn"), ("attention.encdec.LayerNorm", "encoder_attn_layer_norm"), ("attention.encdec_output.dense", "encoder_attn.out_proj"), ("attention.encdec", "encoder_attn"), ("key", "k_proj"), ("value", "v_proj"), ("query", "q_proj"), ("decoder.LayerNorm", "decoder.layernorm_embedding"), ] + END_COMMON ) _lowerCAmelCase = ( INIT_COMMON + [ ("embeddings.word_embeddings", "shared.weight"), ("embeddings.position_embeddings", "embed_positions.weight"), ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.output"), ("attention.self", "self_attn.self"), ("encoder.LayerNorm", "encoder.layernorm_embedding"), ] + END_COMMON ) _lowerCAmelCase = [ "encdec/key/bias", "encdec/query/bias", "encdec/value/bias", "self/key/bias", "self/query/bias", "self/value/bias", "encdec_output/dense/bias", "attention/output/dense/bias", ] def _snake_case ( __snake_case , __snake_case ): for tf_name, hf_name in patterns: _UpperCamelCase = k.replace(__snake_case , __snake_case ) return k def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = BigBirdPegasusConfig(**__snake_case ) _UpperCamelCase = BigBirdPegasusForConditionalGeneration(__snake_case ) _UpperCamelCase = torch_model.state_dict() _UpperCamelCase = {} # separating decoder weights _UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} _UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ): _UpperCamelCase = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE] if any(__snake_case ): continue _UpperCamelCase = DECODER_PATTERNS _UpperCamelCase = rename_state_dict_key(__snake_case , __snake_case ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): _UpperCamelCase = v.T _UpperCamelCase = torch.from_numpy(__snake_case ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ): _UpperCamelCase = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE] if any(__snake_case ): continue _UpperCamelCase = REMAINING_PATTERNS _UpperCamelCase = rename_state_dict_key(__snake_case , __snake_case ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): _UpperCamelCase = v.T _UpperCamelCase = torch.from_numpy(__snake_case ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" _UpperCamelCase = mapping['''model.embed_positions.weight'''] _UpperCamelCase = mapping.pop('''model.embed_positions.weight''' ) _UpperCamelCase , _UpperCamelCase = torch_model.load_state_dict(__snake_case , strict=__snake_case ) _UpperCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def _snake_case ( __snake_case ): _UpperCamelCase = tf.train.list_variables(__snake_case ) _UpperCamelCase = {} _UpperCamelCase = ['''global_step'''] for name, shape in tqdm(__snake_case , desc='''converting tf checkpoint to dict''' ): _UpperCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue _UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case ) _UpperCamelCase = array return tf_weights def _snake_case ( __snake_case , __snake_case , __snake_case ): _UpperCamelCase = get_tf_weights_as_numpy(__snake_case ) _UpperCamelCase = convert_bigbird_pegasus(__snake_case , __snake_case ) torch_model.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.") _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
10
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
0
'''simple docstring''' def lowerCAmelCase (__A , __A): """simple docstring""" return 1 if input_a == input_a else 0 def lowerCAmelCase (): """simple docstring""" assert xnor_gate(0 , 0) == 1 assert xnor_gate(0 , 1) == 0 assert xnor_gate(1 , 0) == 0 assert xnor_gate(1 , 1) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
11
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
0
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests lowerCamelCase__ : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
12
import itertools import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _A = 2 while True: if is_prime(_snake_case ): yield num num += 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) ) if __name__ == "__main__": print(f'{solution() = }')
2
0
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan A__ : List[Any] = 6_3_7_8_1_3_7.0 A__ : Any = 6_3_5_6_7_5_2.3_1_4_2_4_5 A__ : Optional[Any] = 6378137 def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float: __lowerCamelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A __lowerCamelCase : str = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) ) __lowerCamelCase : Union[str, Any] = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) ) __lowerCamelCase : Union[str, Any] = radians(UpperCAmelCase_ ) __lowerCamelCase : str = radians(UpperCAmelCase_ ) # Equation __lowerCamelCase : Union[str, Any] = sin((phi_a - phi_a) / 2 ) __lowerCamelCase : int = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda __lowerCamelCase : Any = sqrt(sin_sq_phi + (cos(UpperCAmelCase_ ) * cos(UpperCAmelCase_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(UpperCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
13
import collections import os import re from pathlib import Path UpperCAmelCase_ = """src/transformers""" # Matches is_xxx_available() UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase_ = re.compile(r"""^\s*try:""") # Catches a line with else: UpperCAmelCase_ = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any: if _re_test_backend.search(_snake_case ) is None: return None _A = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any: with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() _A = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): _A = _re_one_line_import_struct.search(_snake_case ).groups()[0] _A = re.findall(r'''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: _A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: _A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any: def find_duplicates(_snake_case :Any ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> int: _A = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: _A = os.path.join(_snake_case , '''__init__.py''' ) _A = parse_init(_snake_case ) if objects is not None: _A = analyze_results(*_snake_case ) if len(_snake_case ) > 0: _A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: _A = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue _A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) _A = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue _A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) _A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules UpperCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A = direct_transformers_import(_snake_case ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f: _A = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_snake_case ) > 0: _A = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
2
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available a__ = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
14
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_A) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]: super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int: _A = load_image(__lowerCAmelCase ) if prompt is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(__lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase ) return model_outputs def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , ) } records.append(__lowerCAmelCase ) return records
2
0
import qiskit def UpperCamelCase ( __magic_name__ : int = 2 ) -> qiskit.result.counts.Counts: """simple docstring""" lowercase__ = qubits # Using Aer's simulator lowercase__ = qiskit.Aer.get_backend("""aer_simulator""" ) # Creating a Quantum Circuit acting on the q register lowercase__ = qiskit.QuantumCircuit(__magic_name__ , __magic_name__ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , __magic_name__ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , __magic_name__ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(__magic_name__ ) ) , list(range(__magic_name__ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator lowercase__ = qiskit.execute(__magic_name__ , __magic_name__ , shots=1000 ) return job.result().get_counts(__magic_name__ ) if __name__ == "__main__": print(F'Total count for various states are: {quantum_entanglement(3)}')
15
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str: _A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' ) _A = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
2
0
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MgpstrTokenizer lowerCamelCase__ = False lowerCamelCase__ = {} lowerCamelCase__ = False def _snake_case ( self : Optional[Any] ): super().setUp() # fmt: off SCREAMING_SNAKE_CASE = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) def _snake_case ( self : Dict , **__lowerCamelCase : str ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = "tester" SCREAMING_SNAKE_CASE = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def _snake_case ( self : Union[str, Any] ): pass def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) SCREAMING_SNAKE_CASE = tokenizer.encode([special_token] , add_special_tokens=__lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , 1 ) SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) self.assertTrue(special_token not in decoded ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_input_output_texts(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertNotEqual(len(__lowerCamelCase ) , 0 ) SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(text_a.replace(" " , "" ) , __lowerCamelCase ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def _snake_case ( self : Optional[int] ): pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def _snake_case ( self : Tuple ): pass
16
from graphs.minimum_spanning_tree_kruskal import kruskal def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _A = 9 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = kruskal(_snake_case , _snake_case ) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_snake_case ) == sorted(_snake_case )
2
0
import numpy as np class lowerCamelCase_ : def __init__( self : Union[str, Any] ): __A : Union[str, Any] = (0, 0) __A : Optional[Any] = None __A : int = 0 __A : List[Any] = 0 __A : Any = 0 def __eq__( self : str , __A : Dict ): return self.position == cell.position def lowerCAmelCase_ ( self : int ): print(self.position ) class lowerCamelCase_ : def __init__( self : Dict , __A : List[str]=(5, 5) ): __A : str = np.zeros(__A ) __A : str = world_size[0] __A : Union[str, Any] = world_size[1] def lowerCAmelCase_ ( self : Dict ): print(self.w ) def lowerCAmelCase_ ( self : int , __A : str ): __A : int = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] __A : Optional[int] = cell.position[0] __A : List[str] = cell.position[1] __A : Optional[int] = [] for n in neughbour_cord: __A : Optional[int] = current_x + n[0] __A : int = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: __A : int = Cell() __A : Optional[Any] = (x, y) __A : Optional[Any] = cell neighbours.append(__A ) return neighbours def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Optional[int] ,a__ : List[str] ) -> Any: __A : Dict = [] __A : Dict = [] _open.append(a__ ) while _open: __A : List[str] = np.argmin([n.f for n in _open] ) __A : Dict = _open[min_f] _closed.append(_open.pop(a__ ) ) if current == goal: break for n in world.get_neigbours(a__ ): for c in _closed: if c == n: continue __A : Tuple = current.g + 1 __A , __A : int = n.position __A , __A : str = goal.position __A : Optional[int] = (ya - ya) ** 2 + (xa - xa) ** 2 __A : Any = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(a__ ) __A : Any = [] while current.parent is not None: path.append(current.position ) __A : Tuple = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": UpperCAmelCase_ : List[str] = Gridworld() # Start position and goal UpperCAmelCase_ : Any = Cell() UpperCAmelCase_ : Tuple = (0, 0) UpperCAmelCase_ : Tuple = Cell() UpperCAmelCase_ : str = (4, 4) print(f"""path from {start.position} to {goal.position}""") UpperCAmelCase_ : Tuple = astar(world, start, goal) # Just for visual reasons. for i in s: UpperCAmelCase_ : Optional[int] = 1 print(world.w)
17
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _SCREAMING_SNAKE_CASE = { "configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"], "tokenization_biogpt": ["BioGptTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "BioGptForCausalLM", "BioGptForTokenClassification", "BioGptForSequenceClassification", "BioGptModel", "BioGptPreTrainedModel", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
18
UpperCAmelCase_ = 2_5_6 # Modulus to hash a string UpperCAmelCase_ = 1_0_0_0_0_0_3 def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool: _A = len(_snake_case ) _A = len(_snake_case ) if p_len > t_len: return False _A = 0 _A = 0 _A = 1 # Calculating the hash of pattern and substring of text for i in range(_snake_case ): _A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def SCREAMING_SNAKE_CASE_ ( ) -> None: _A = '''abc1abc12''' _A = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _A = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case ) # Test 2) _A = '''ABABX''' _A = '''ABABZABABYABABX''' assert rabin_karp(_snake_case , _snake_case ) # Test 3) _A = '''AAAB''' _A = '''ABAAAAAB''' assert rabin_karp(_snake_case , _snake_case ) # Test 4) _A = '''abcdabcy''' _A = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(_snake_case , _snake_case ) # Test 5) _A = '''Lü''' _A = '''Lüsai''' assert rabin_karp(_snake_case , _snake_case ) _A = '''Lue''' assert not rabin_karp(_snake_case , _snake_case ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
2
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[Any]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = 13 _UpperCamelCase = 7 _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = 99 _UpperCamelCase = 32 _UpperCamelCase = 2 _UpperCamelCase = 4 _UpperCamelCase = 37 _UpperCamelCase = '''gelu''' _UpperCamelCase = 0.1 _UpperCamelCase = 0.1 _UpperCamelCase = 5_12 _UpperCamelCase = 16 _UpperCamelCase = 2 _UpperCamelCase = 0.02 _UpperCamelCase = 3 _UpperCamelCase = 4 _UpperCamelCase = None def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = TFRoFormerModel(config=__a) _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCamelCase = [input_ids, input_mask] _UpperCamelCase = model(__a) _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = True _UpperCamelCase = TFRoFormerForCausalLM(config=__a) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(__a)['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape) , [self.batch_size, self.seq_length, self.vocab_size]) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> int: '''simple docstring''' _UpperCamelCase = TFRoFormerForMaskedLM(config=__a) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Tuple: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = TFRoFormerForSequenceClassification(config=__a) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.num_choices _UpperCamelCase = TFRoFormerForMultipleChoice(config=__a) _UpperCamelCase = tf.tile(tf.expand_dims(__a , 1) , (1, self.num_choices, 1)) _UpperCamelCase = tf.tile(tf.expand_dims(__a , 1) , (1, self.num_choices, 1)) _UpperCamelCase = tf.tile(tf.expand_dims(__a , 1) , (1, self.num_choices, 1)) _UpperCamelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } _UpperCamelCase = model(__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = TFRoFormerForTokenClassification(config=__a) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> str: '''simple docstring''' _UpperCamelCase = TFRoFormerForQuestionAnswering(config=__a) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(__a) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) lowercase__ = ( { 'feature-extraction': TFRoFormerModel, 'fill-mask': TFRoFormerForMaskedLM, 'question-answering': TFRoFormerForQuestionAnswering, 'text-classification': TFRoFormerForSequenceClassification, 'text-generation': TFRoFormerForCausalLM, 'token-classification': TFRoFormerForTokenClassification, 'zero-shot': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) lowercase__ = False lowercase__ = False def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> List[str]: '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = TFRoFormerModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37) def UpperCAmelCase ( self) -> str: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__a) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a) @slow def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''') self.assertIsNotNone(__a) @require_tf class _UpperCAmelCase( unittest.TestCase ): @slow def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''') _UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]]) _UpperCamelCase = model(__a)[0] # TODO Replace vocab size _UpperCamelCase = 5_00_00 _UpperCamelCase = [1, 6, vocab_size] self.assertEqual(output.shape , __a) print(output[:, :3, :3]) # TODO Replace values below with what was printed above. _UpperCamelCase = tf.constant( [ [ [-0.1205_3341, -1.026_4901, 0.2922_1946], [-1.513_3783, 0.19_7433, 0.1519_0607], [-5.013_5403, -3.90_0256, -0.8403_8764], ] ]) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4) @require_tf class _UpperCAmelCase( unittest.TestCase ): lowercase__ = 1E-4 def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = tf.constant([[4, 10]]) _UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6) _UpperCamelCase = emba(input_ids.shape) _UpperCamelCase = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]]) tf.debugging.assert_near(__a , __a , atol=self.tolerance) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ]) _UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12) emba([2, 16, 5_12]) _UpperCamelCase = emba.weight[:3, :5] tf.debugging.assert_near(__a , __a , atol=self.tolerance) @require_tf class _UpperCAmelCase( unittest.TestCase ): lowercase__ = 1E-4 def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' # 2,12,16,64 _UpperCamelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 1_00 _UpperCamelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 1_00 _UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64) _UpperCamelCase = embed_positions([2, 16, 7_68])[None, None, :, :] _UpperCamelCase , _UpperCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __a , __a , __a) _UpperCamelCase = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ]) _UpperCamelCase = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ]) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __a , atol=self.tolerance) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __a , atol=self.tolerance)
19
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } UpperCAmelCase_ = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } UpperCAmelCase_ = """</w>""" UpperCAmelCase_ = """@@ """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs # Speech2Text2 has no max input length UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict: super().__init__( unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , ) _A = do_lower_case with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) _A = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) _A = None _A = None else: with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: _A = merges_handle.read().split('''\n''' )[:-1] _A = [tuple(merge.split()[:2] ) for merge in merges] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = {} @property def snake_case_ ( self : List[str] ) -> int: return len(self.decoder ) def snake_case_ ( self : Dict ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: _A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(__lowerCAmelCase ): try: _A = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__lowerCAmelCase ) _A = new_word if len(__lowerCAmelCase ) == 1: break else: _A = get_pairs(__lowerCAmelCase ) _A = ''' '''.join(__lowerCAmelCase ) if word == "\n " + BPE_TOKEN_MERGES: _A = '''\n''' + BPE_TOKEN_MERGES if word.endswith(__lowerCAmelCase ): _A = word.replace(__lowerCAmelCase , '''''' ) _A = word.replace(''' ''' , __lowerCAmelCase ) _A = word return word def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: _A = text.lower() _A = text.split() _A = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int: return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str: _A = self.decoder.get(__lowerCAmelCase , self.unk_token ) return result def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str: _A = ''' '''.join(__lowerCAmelCase ) # make sure @@ tokens are concatenated _A = ''''''.join(string.split(__lowerCAmelCase ) ) return string def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) _A = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) _A = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
2
0
import requests _lowerCAmelCase: Union[str, Any] = '' # <-- Put your OpenWeatherMap appid here! _lowerCAmelCase: Union[str, Any] = 'https://api.openweathermap.org/data/2.5/' def _lowercase( __a : str = "Chicago" , __a : str = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def _lowercase( __a : str = "Kolkata, India" , __a : str = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def _lowercase( __a : float = 55.68 , __a : float = 12.57 , __a : str = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: _lowerCAmelCase: Dict = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
20
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Optional[int] ) -> None: _A = [] _A = {} _A = 0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Optional[int] ) -> str: return str(self.heap ) def snake_case_ ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _A , _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A = self.heap[0] self._bubble_down(__lowerCAmelCase ) return elem def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Update the weight of the given key _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[curr_pos] _A , _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_up(__lowerCAmelCase ) return None def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A = self.position_map[elem] _A , _A = self.heap[curr_pos] _A = get_child_left_position(__lowerCAmelCase ) _A = get_child_right_position(__lowerCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _A , _A = self.heap[child_left_position] _A , _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) if child_left_position < self.elements: _A , _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) else: return None if child_right_position < self.elements: _A , _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) return None def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: # Swap the nodes at the given positions _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A , _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : str ) -> None: _A = {} _A = 0 def __repr__( self : str ) -> str: return str(self.connections ) def __len__( self : Dict ) -> int: return self.nodes def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A = {} self.nodes += 1 def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCAmelCase ) self.add_node(__lowerCAmelCase ) _A = weight _A = weight def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_snake_case , _snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node return dist, parent
2
0
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) class __A ( UpperCamelCase__ ): UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self :Optional[int] , __snake_case :int="</s>" , __snake_case :List[Any]="<unk>" , __snake_case :Optional[int]="<pad>" , __snake_case :Any=1_25 , __snake_case :Optional[Any]=None , **__snake_case :Optional[int] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: __magic_name__ : Tuple =[f"<extra_id_{i}>" for i in range(__snake_case )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __magic_name__ : List[Any] =len(set(filter(lambda __snake_case : bool("""extra_id""" in str(__snake_case ) ) , __snake_case ) ) ) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) __magic_name__ : Tuple =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token __magic_name__ : List[str] =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token __magic_name__ : int =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token super().__init__( eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , ) __magic_name__ : Union[str, Any] =extra_ids __magic_name__ : Tuple =2**8 # utf is 8 bits # define special tokens dict __magic_name__ : Dict[int, str] ={ self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __magic_name__ : Optional[int] =len(self.special_tokens_encoder ) __magic_name__ : Any =len(__snake_case ) for i, token in enumerate(__snake_case ): __magic_name__ : Union[str, Any] =self.vocab_size + i - n __magic_name__ : Dict[str, int] ={v: k for k, v in self.special_tokens_encoder.items()} @property def A__ ( self :Optional[Any] ): '''simple docstring''' return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def A__ ( self :Tuple , __snake_case :List[int] , __snake_case :Optional[List[int]] = None , __snake_case :bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__snake_case )) + [1] return ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1] def A__ ( self :Union[str, Any] , __snake_case :List[int] ): '''simple docstring''' if len(__snake_case ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def A__ ( self :Any , __snake_case :List[int] , __snake_case :Optional[List[int]] = None ): '''simple docstring''' __magic_name__ : Union[str, Any] =[self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def A__ ( self :Union[str, Any] , __snake_case :List[int] , __snake_case :Optional[List[int]] = None ): '''simple docstring''' __magic_name__ : Optional[Any] =self._add_eos_if_not_present(__snake_case ) if token_ids_a is None: return token_ids_a else: __magic_name__ : List[str] =self._add_eos_if_not_present(__snake_case ) return token_ids_a + token_ids_a def A__ ( self :Tuple , __snake_case :str ): '''simple docstring''' __magic_name__ : Dict =[chr(__snake_case ) for i in text.encode("""utf-8""" )] return tokens def A__ ( self :int , __snake_case :List[str] ): '''simple docstring''' if token in self.special_tokens_encoder: __magic_name__ : Dict =self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __magic_name__ : int =self.added_tokens_encoder[token] elif len(__snake_case ) != 1: __magic_name__ : Optional[Any] =self.unk_token_id else: __magic_name__ : Any =ord(__snake_case ) + self._num_special_tokens return token_id def A__ ( self :Optional[Any] , __snake_case :int ): '''simple docstring''' if index in self.special_tokens_decoder: __magic_name__ : Any =self.special_tokens_decoder[index] else: __magic_name__ : int =chr(index - self._num_special_tokens ) return token def A__ ( self :Union[str, Any] , __snake_case :int ): '''simple docstring''' __magic_name__ : Any =B"""""" for token in tokens: if token in self.special_tokens_decoder: __magic_name__ : int =self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: __magic_name__ : Union[str, Any] =self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: __magic_name__ : str =token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: __magic_name__ : Optional[int] =token.encode("""utf-8""" ) else: __magic_name__ : Tuple =bytes([ord(__snake_case )] ) bstring += tok_string __magic_name__ : str =bstring.decode("""utf-8""" , errors="""ignore""" ) return string def A__ ( self :Tuple , __snake_case :str , __snake_case :Optional[str] = None ): '''simple docstring''' return ()
21
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = """▁""" UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} UpperCAmelCase_ = { """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Tuple = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _A = vocab_file _A = monolingual_vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _A = {} _A = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = cnt cnt += 1 with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _A = line.strip().split()[0] _A = len(self.fairseq_tokens_to_ids ) if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = len(self.fairseq_tokens_to_ids ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> List[Any]: _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def snake_case_ ( self : Dict ) -> Optional[Any]: _A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]: return self.fairseq_ids_to_tokens[index] def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple: _A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__lowerCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
2
0
'''simple docstring''' from __future__ import annotations _snake_case : List[str] = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def snake_case_ (UpperCamelCase : list[list[int]] , UpperCamelCase : list[int] , UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : list[list[int]] , ): '''simple docstring''' _a = [ [0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) ) ] # the reference grid _a = 1 _a = [ [0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) ) ] # the action grid _a = init[0] _a = init[1] _a = 0 _a = g + heuristic[x][y] # cost from starting cell to destination cell _a = [[f, g, x, y]] _a = False # flag that is set when search is complete _a = False # flag set if we can't find expand while not found and not resign: if len(UpperCamelCase ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() _a = cell.pop() _a = next_cell[2] _a = next_cell[3] _a = next_cell[1] if x == goal[0] and y == goal[1]: _a = True else: for i in range(len(UpperCamelCase ) ): # to try out different valid actions _a = x + DIRECTIONS[i][0] _a = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: _a = g + cost _a = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) _a = 1 _a = i _a = [] _a = goal[0] _a = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: _a = x - DIRECTIONS[action[x][y]][0] _a = y - DIRECTIONS[action[x][y]][1] _a = xa _a = ya invpath.append([x, y] ) _a = [] for i in range(len(UpperCamelCase ) ): path.append(invpath[len(UpperCamelCase ) - 1 - i] ) return path, action if __name__ == "__main__": _snake_case : str = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] _snake_case : List[Any] = [0, 0] # all coordinates are given in format [y,x] _snake_case : Optional[int] = [len(grid) - 1, len(grid[0]) - 1] _snake_case : Tuple = 1 # the cost map which pushes the path closer to the goal _snake_case : str = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): _snake_case : Optional[int] = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map _snake_case : str = 99 _snake_case , _snake_case : Any = search(grid, init, goal, cost, heuristic) print('ACTION MAP') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
22
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]: _A , _A = set(_snake_case ), [start] while stack: _A = stack.pop() explored.add(_snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_snake_case ) return explored UpperCAmelCase_ = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
2
0
import os import time import numpy as np import onnxruntime as ort snake_case__ : Tuple = """1""" snake_case__ : Optional[int] = """0""" snake_case__ : Any = """1""" snake_case__ : List[str] = ort.SessionOptions() snake_case__ : List[str] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("""Create inference session...""") snake_case__ : List[Any] = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] snake_case__ : str = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider) snake_case__ : List[str] = ort.RunOptions() snake_case__ : Optional[int] = 1_2_8 snake_case__ : Optional[int] = 1 snake_case__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa) snake_case__ : int = np.ones((batch, sequence), dtype=np.intaa) snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa) print("""Warm up phase...""") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Start inference...""") snake_case__ : str = time.time() snake_case__ : Tuple = 2_0_0_0 snake_case__ : Dict = {} for iter in range(max_iters): snake_case__ : Tuple = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_0_0_0 / max_iters))
23
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
2
0
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _UpperCamelCase (_lowerCamelCase : Union[dict, list, tuple, torch.Tensor] )-> List[Tuple[int, ...]]: '''simple docstring''' __snake_case = [] if isinstance(_lowerCamelCase , _lowerCamelCase ): for v in tree.values(): shapes.extend(_fetch_dims(_lowerCamelCase ) ) elif isinstance(_lowerCamelCase , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(_lowerCamelCase ) ) elif isinstance(_lowerCamelCase , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('''Not supported''' ) return shapes @torch.jit.ignore def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Tuple[int, ...] )-> Tuple[int, ...]: '''simple docstring''' __snake_case = [] for d in reversed(_lowerCamelCase ): idx.append(flat_idx % d ) __snake_case = flat_idx // d return tuple(reversed(_lowerCamelCase ) ) @torch.jit.ignore def _UpperCamelCase (_lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Optional[Sequence[bool]] = None , _lowerCamelCase : Optional[Sequence[bool]] = None , )-> List[Tuple[slice, ...]]: '''simple docstring''' def reduce_edge_list(_lowerCamelCase : List[bool] ) -> None: __snake_case = True for i in range(len(_lowerCamelCase ) ): __snake_case = -1 * (i + 1) l[reversed_idx] &= tally __snake_case = l[reversed_idx] if start_edges is None: __snake_case = [s == 0 for s in start] reduce_edge_list(_lowerCamelCase ) if end_edges is None: __snake_case = [e == (d - 1) for e, d in zip(_lowerCamelCase , _lowerCamelCase )] reduce_edge_list(_lowerCamelCase ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(_lowerCamelCase ) == 0: return [()] elif len(_lowerCamelCase ) == 1: return [(slice(start[0] , end[0] + 1 ),)] __snake_case = [] __snake_case = [] # Dimensions common to start and end can be selected directly for s, e in zip(_lowerCamelCase , _lowerCamelCase ): if s == e: path_list.append(slice(_lowerCamelCase , s + 1 ) ) else: break __snake_case = tuple(_lowerCamelCase ) __snake_case = len(_lowerCamelCase ) # start == end, and we're done if divergence_idx == len(_lowerCamelCase ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None __snake_case = start[divergence_idx] return tuple( path + (slice(_lowerCamelCase , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None __snake_case = end[divergence_idx] return tuple( path + (slice(_lowerCamelCase , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) __snake_case = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def _UpperCamelCase (_lowerCamelCase : torch.Tensor , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> torch.Tensor: '''simple docstring''' __snake_case = t.shape[:no_batch_dims] __snake_case = list(_flat_idx_to_idx(_lowerCamelCase , _lowerCamelCase ) ) # _get_minimal_slice_set is inclusive __snake_case = list(_flat_idx_to_idx(flat_end - 1 , _lowerCamelCase ) ) # Get an ordered list of slices to perform __snake_case = _get_minimal_slice_set( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) __snake_case = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def _UpperCamelCase (_lowerCamelCase : Callable , _lowerCamelCase : Dict[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False , _lowerCamelCase : Any = None , _lowerCamelCase : bool = False , )-> Any: '''simple docstring''' if not (len(_lowerCamelCase ) > 0): raise ValueError('''Must provide at least one input''' ) __snake_case = [shape[:no_batch_dims] for shape in _fetch_dims(_lowerCamelCase )] __snake_case = tuple([max(_lowerCamelCase ) for s in zip(*_lowerCamelCase )] ) def _prep_inputs(_lowerCamelCase : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: __snake_case = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) __snake_case = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: __snake_case = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t __snake_case = tensor_tree_map(_prep_inputs , _lowerCamelCase ) __snake_case = None if _out is not None: __snake_case = tensor_tree_map(lambda _lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) __snake_case = 1 for d in orig_batch_dims: flat_batch_dim *= d __snake_case = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(_lowerCamelCase : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t __snake_case = 0 __snake_case = prepped_outputs for _ in range(_lowerCamelCase ): # Chunk the input if not low_mem: __snake_case = _select_chunk else: __snake_case = partial( _chunk_slice , flat_start=_lowerCamelCase , flat_end=min(_lowerCamelCase , i + chunk_size ) , no_batch_dims=len(_lowerCamelCase ) , ) __snake_case = tensor_tree_map(_lowerCamelCase , _lowerCamelCase ) # Run the layer on the chunk __snake_case = layer(**_lowerCamelCase ) # Allocate space for the output if out is None: __snake_case = tensor_tree_map(lambda _lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _lowerCamelCase ) # Put the chunk in its pre-allocated space if isinstance(_lowerCamelCase , _lowerCamelCase ): def assign(_lowerCamelCase : dict , _lowerCamelCase : dict ) -> None: for k, v in da.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): assign(_lowerCamelCase , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: __snake_case = da[k] assign(_lowerCamelCase , _lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): for xa, xa in zip(_lowerCamelCase , _lowerCamelCase ): if _add_into_out: xa[i : i + chunk_size] += xa else: __snake_case = xa elif isinstance(_lowerCamelCase , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: __snake_case = output_chunk else: raise ValueError('''Not supported''' ) i += chunk_size __snake_case = tensor_tree_map(lambda _lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , _lowerCamelCase ) return out class lowerCAmelCase : def __init__( self , __SCREAMING_SNAKE_CASE = 512 , ) -> List[Any]: '''simple docstring''' __snake_case = max_chunk_size __snake_case = None __snake_case = None def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' logging.info('''Tuning chunk size...''' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size __snake_case = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] __snake_case = [c for c in candidates if c > min_chunk_size] __snake_case = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__SCREAMING_SNAKE_CASE ) -> bool: try: with torch.no_grad(): fn(*__SCREAMING_SNAKE_CASE , chunk_size=__SCREAMING_SNAKE_CASE ) return True except RuntimeError: return False __snake_case = 0 __snake_case = len(__SCREAMING_SNAKE_CASE ) - 1 while i > min_viable_chunk_size_index: __snake_case = test_chunk_size(candidates[i] ) if not viable: __snake_case = (min_viable_chunk_size_index + i) // 2 else: __snake_case = i __snake_case = (i + len(__SCREAMING_SNAKE_CASE ) - 1) // 2 return candidates[min_viable_chunk_size_index] def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool: '''simple docstring''' __snake_case = True for aa, aa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): assert type(__SCREAMING_SNAKE_CASE ) == type(__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ): consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __snake_case = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )] __snake_case = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )] consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: consistent &= aa == aa return consistent def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int: '''simple docstring''' __snake_case = True __snake_case = tree_map(lambda __SCREAMING_SNAKE_CASE : a.shape if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) else a , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(__SCREAMING_SNAKE_CASE ) __snake_case = self._compare_arg_caches(self.cached_arg_data , __SCREAMING_SNAKE_CASE ) else: # Otherwise, we can reuse the precomputed value __snake_case = False if not consistent: __snake_case = self._determine_favorable_chunk_size( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) __snake_case = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
24
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "xlnet" a__ : Dict = ["mems"] a__ : List[str] = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple: _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs['''use_cache'''] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
2
0
def lowerCamelCase__ ( _a): return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a) # No of vertices in graph SCREAMING_SNAKE_CASE : str = [0] * n SCREAMING_SNAKE_CASE : str = [False] * n def dfs(_a , _a , _a , _a): SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : str = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(_a , _a , _a , id_) SCREAMING_SNAKE_CASE : List[Any] = min(low[at] , low[to]) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at)) else: # This edge is a back edge and cannot be a bridge SCREAMING_SNAKE_CASE : Tuple = min(low[at] , low[to]) SCREAMING_SNAKE_CASE : list[tuple[int, int]] = [] for i in range(_a): if not visited[i]: dfs(_a , -1 , _a , id_) return bridges if __name__ == "__main__": import doctest doctest.testmod()
25
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str: return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
'''simple docstring''' from __future__ import annotations import math def _a ( _lowerCamelCase , _lowerCamelCase ) -> float: """simple docstring""" __snake_case : Any = u for i in range(1 , _lowerCamelCase ): __snake_case : Optional[Any] = temp * (u - i) return temp def _a ( ) -> None: """simple docstring""" __snake_case : str = int(input("""enter the numbers of values: """ ) ) __snake_case : list[list[float]] = [] for _ in range(_lowerCamelCase ): y.append([] ) for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): y[i].append(_lowerCamelCase ) __snake_case : Any = 0 print("""enter the values of parameters in a list: """ ) __snake_case : Dict = list(map(_lowerCamelCase , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(_lowerCamelCase ): __snake_case : Optional[int] = float(input() ) __snake_case : List[str] = int(input("""enter the value to interpolate: """ ) ) __snake_case : int = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , _lowerCamelCase ): for j in range(n - i ): __snake_case : List[Any] = y[j + 1][i - 1] - y[j][i - 1] __snake_case : Union[str, Any] = y[0][0] for i in range(1 , _lowerCamelCase ): summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase ) print(F'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
26
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _A = 0 # if input_string is "aba" than new_input_string become "a|b|a" _A = '' _A = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_SCREAMING_SNAKE_CASE ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _A, _A = 0, 0 # length[i] shows the length of palindromic substring with center i _A = [1 for i in range(len(_SCREAMING_SNAKE_CASE ) )] # for each character in new_string find corresponding palindromic string _A = 0 for j in range(len(_SCREAMING_SNAKE_CASE ) ): _A = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_SCREAMING_SNAKE_CASE ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _A = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _A = j - k + 1 # noqa: E741 _A = j + k - 1 # update max_length and start position if max_length < length[j]: _A = length[j] _A = j # create that string _A = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
27
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : List[str] = '''roformer''' def __init__( self, A=50_000, A=None, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=1_536, A=2, A=0.02, A=1E-12, A=0, A=False, A=True, **A, ): '''simple docstring''' super().__init__(pad_token_id=A, **A ) SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : Any = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : str = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : List[str] = type_vocab_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps SCREAMING_SNAKE_CASE : Tuple = rotary_value SCREAMING_SNAKE_CASE : str = use_cache class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def UpperCamelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'sequence'} SCREAMING_SNAKE_CASE : int = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
28
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) A_ = torch.device("""cpu""") def lowercase ( ): lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw ) return im def lowercase ( lowerCAmelCase__ ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] ) def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ = dct.pop(lowerCAmelCase__ ) lowerCamelCase_ = val def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ = [] for k in state_dict.keys(): lowerCamelCase_ = k if ".pwconv" in k: lowerCamelCase_ = k_new.replace('''.pwconv''' ,'''.point_wise_conv''' ) if ".dwconv" in k: lowerCamelCase_ = k_new.replace('''.dwconv''' ,'''.depth_wise_conv''' ) if ".Proj." in k: lowerCamelCase_ = k_new.replace('''.Proj.''' ,'''.proj.''' ) if "patch_embed" in k_new: lowerCamelCase_ = k_new.replace('''patch_embed''' ,'''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: lowerCamelCase_ = k_new.split('''.''' ) if ls[2].isdigit(): lowerCamelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: lowerCamelCase_ = k_new.replace('''network''' ,'''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCamelCase_ = 1_000 lowerCamelCase_ = '''huggingface/label-files''' lowerCamelCase_ = '''imagenet-1k-id2label.json''' lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) ) lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} lowerCamelCase_ = idalabel lowerCamelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCamelCase_ = [3, 3, 6, 4] lowerCamelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCamelCase_ = [3, 3, 9, 6] lowerCamelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCamelCase_ = [4, 3, 10, 5] lowerCamelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCamelCase_ = [4, 4, 12, 6] lowerCamelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' ,check_hash=lowerCAmelCase__ ) else: lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' ) lowerCamelCase_ = checkpoint lowerCamelCase_ = create_rename_keys(lowerCAmelCase__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # load HuggingFace model lowerCamelCase_ = SwiftFormerForImageClassification(lowerCAmelCase__ ).eval() hf_model.load_state_dict(lowerCAmelCase__ ) # prepare test inputs lowerCamelCase_ = prepare_img() lowerCamelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ) # compare outputs from both models lowerCamelCase_ = get_expected_output(lowerCAmelCase__ ) lowerCamelCase_ = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1_000] ) assert torch.allclose(hf_logits[0, 0:5] ,lowerCAmelCase__ ,atol=1E-3 ) Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") A_ = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
29
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
0
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __a( nn.Module ): """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 0.0 lowerCAmelCase = 1 lowerCAmelCase = 1 lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = jnp.floataa def a__ ( self ) -> int: UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : Union[str, Any] = [] for i in range(self.num_layers ): UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels UpperCAmelCase_ : Tuple = FlaxResnetBlockaD( in_channels=_SCREAMING_SNAKE_CASE ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = resnets UpperCAmelCase_ : List[str] = attentions if self.add_downsample: UpperCAmelCase_ : Tuple = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> List[Any]: UpperCAmelCase_ : List[Any] = () for resnet, attn in zip(self.resnets ,self.attentions ): UpperCAmelCase_ : str = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase_ : Any = self.downsamplers_a(_SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class __a( nn.Module ): """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 0.0 lowerCAmelCase = 1 lowerCAmelCase = True lowerCAmelCase = jnp.floataa def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = [] for i in range(self.num_layers ): UpperCAmelCase_ : Dict = self.in_channels if i == 0 else self.out_channels UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD( in_channels=_SCREAMING_SNAKE_CASE ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = resnets if self.add_downsample: UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Any: UpperCAmelCase_ : str = () for resnet in self.resnets: UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase_ : Optional[Any] = self.downsamplers_a(_SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class __a( nn.Module ): """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 0.0 lowerCAmelCase = 1 lowerCAmelCase = 1 lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = jnp.floataa def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = [] for i in range(self.num_layers ): UpperCAmelCase_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase_ : Any = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = resnets UpperCAmelCase_ : Dict = attentions if self.add_upsample: UpperCAmelCase_ : Tuple = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Dict: for resnet, attn in zip(self.resnets ,self.attentions ): # pop res hidden states UpperCAmelCase_ : int = res_hidden_states_tuple[-1] UpperCAmelCase_ : int = res_hidden_states_tuple[:-1] UpperCAmelCase_ : List[str] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE ) if self.add_upsample: UpperCAmelCase_ : Dict = self.upsamplers_a(_SCREAMING_SNAKE_CASE ) return hidden_states class __a( nn.Module ): """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 0.0 lowerCAmelCase = 1 lowerCAmelCase = True lowerCAmelCase = jnp.floataa def a__ ( self ) -> Any: UpperCAmelCase_ : List[Any] = [] for i in range(self.num_layers ): UpperCAmelCase_ : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase_ : Any = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase_ : int = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = resnets if self.add_upsample: UpperCAmelCase_ : List[str] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> str: for resnet in self.resnets: # pop res hidden states UpperCAmelCase_ : Optional[Any] = res_hidden_states_tuple[-1] UpperCAmelCase_ : List[str] = res_hidden_states_tuple[:-1] UpperCAmelCase_ : List[str] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) UpperCAmelCase_ : Optional[Any] = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE ) if self.add_upsample: UpperCAmelCase_ : Tuple = self.upsamplers_a(_SCREAMING_SNAKE_CASE ) return hidden_states class __a( nn.Module ): """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = 0.0 lowerCAmelCase = 1 lowerCAmelCase = 1 lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = jnp.floataa def a__ ( self ) -> Tuple: # there is always at least one resnet UpperCAmelCase_ : Any = [ FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) ] UpperCAmelCase_ : int = [] for _ in range(self.num_layers ): UpperCAmelCase_ : Tuple = FlaxTransformeraDModel( in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = resnets UpperCAmelCase_ : Optional[Any] = attentions def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Any: UpperCAmelCase_ : int = self.resnets[0](_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions ,self.resnets[1:] ): UpperCAmelCase_ : Optional[int] = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE ) return hidden_states
30
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
0
def UpperCAmelCase_ ( __UpperCAmelCase : int = 10 , __UpperCAmelCase : int = 10_00 , __UpperCAmelCase : bool = True ) -> int: assert ( isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' ) return min_val if option else max_val def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int: return int((number_a + number_a) / 2 ) def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None: assert ( isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)' ) if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value' ) def answer(__UpperCAmelCase : int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...' ) SCREAMING_SNAKE_CASE_ = lower SCREAMING_SNAKE_CASE_ = higher SCREAMING_SNAKE_CASE_ = [] while True: SCREAMING_SNAKE_CASE_ = get_avg(__UpperCAmelCase , __UpperCAmelCase ) last_numbers.append(__UpperCAmelCase ) if answer(__UpperCAmelCase ) == "low": SCREAMING_SNAKE_CASE_ = number elif answer(__UpperCAmelCase ) == "high": SCREAMING_SNAKE_CASE_ = number else: break print(f"guess the number : {last_numbers[-1]}" ) print(f"details : {last_numbers!s}" ) def UpperCAmelCase_ ( ) -> None: SCREAMING_SNAKE_CASE_ = int(input('Enter lower value : ' ).strip() ) SCREAMING_SNAKE_CASE_ = int(input('Enter high value : ' ).strip() ) SCREAMING_SNAKE_CASE_ = int(input('Enter value to guess : ' ).strip() ) guess_the_number(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": main()
31
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = "openai-gpt" a__ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = afn _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_first_dropout _A = summary_proj_to_labels super().__init__(**__lowerCAmelCase )
2
0
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = "T5Config" class __UpperCamelCase ( A__ ): __A : str = """mt5""" __A : Optional[Any] = MTaConfig class __UpperCamelCase ( A__ ): __A : Tuple = """mt5""" __A : List[str] = MTaConfig class __UpperCamelCase ( A__ ): __A : Optional[Any] = """mt5""" __A : int = MTaConfig
32
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __magic_name__ (unittest.TestCase ): '''simple docstring''' def __init__( self:Union[str, Any] , _a:Optional[int] , _a:Tuple=7 , _a:Dict=3 , _a:Optional[Any]=18 , _a:Optional[Any]=30 , _a:Union[str, Any]=4_00 , _a:str=True , _a:Optional[Any]=32 , _a:Tuple=True , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = num_channels snake_case__ = image_size snake_case__ = min_resolution snake_case__ = max_resolution snake_case__ = do_resize snake_case__ = size_divisor snake_case__ = do_rescale def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __magic_name__ (snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = GLPNImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = GLPNImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , '''do_resize''' ) ) self.assertTrue(hasattr(_a , '''size_divisor''' ) ) self.assertTrue(hasattr(_a , '''resample''' ) ) self.assertTrue(hasattr(_a , '''do_rescale''' ) ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): pass def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
33
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class snake_case_ : """simple docstring""" @staticmethod def UpperCAmelCase__ ( *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]: pass @is_pipeline_test @require_torch @require_vision class snake_case_ ( unittest.TestCase ): """simple docstring""" A_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict: UpperCamelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCamelCase = [ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Dict: UpperCamelCase = vqa_pipeline(lowerCamelCase_ , top_k=1) self.assertEqual( lowerCamelCase_ , [ [{'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}], [{'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}], ] , ) @require_torch def UpperCAmelCase__ ( self) -> Union[str, Any]: UpperCamelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''') UpperCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCamelCase = '''How many cats are there?''' UpperCamelCase = vqa_pipeline(image=lowerCamelCase_ , question='''How many cats are there?''' , top_k=2) self.assertEqual( lowerCamelCase_ , [{'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}, {'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}]) UpperCamelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( lowerCamelCase_ , [{'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}, {'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}]) @slow @require_torch def UpperCAmelCase__ ( self) -> Optional[int]: UpperCamelCase = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''') UpperCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCamelCase = '''How many cats are there?''' UpperCamelCase = vqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]) UpperCamelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]) UpperCamelCase = vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4) , [[{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''') def UpperCAmelCase__ ( self) -> Optional[int]: pass
34
import itertools import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _A = 2 while True: if is_prime(_snake_case ): yield num num += 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) ) if __name__ == "__main__": print(f'{solution() = }')
2
0
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ :List[str] = logging.get_logger(__name__) a_ :List[str] = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class lowercase ( _UpperCAmelCase ): lowerCamelCase : Optional[Any] = '''owlvit_text_model''' def __init__( self : List[str] , _lowercase : Optional[Any]=4_94_08 , _lowercase : Tuple=5_12 , _lowercase : List[str]=20_48 , _lowercase : List[Any]=12 , _lowercase : Any=8 , _lowercase : int=16 , _lowercase : int="quick_gelu" , _lowercase : Any=1E-5 , _lowercase : Union[str, Any]=0.0 , _lowercase : Union[str, Any]=0.02 , _lowercase : List[Any]=1.0 , _lowercase : List[str]=0 , _lowercase : List[Any]=4_94_06 , _lowercase : Dict=4_94_07 , **_lowercase : Tuple , ): super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) SCREAMING_SNAKE_CASE__ : Any = vocab_size SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_dropout SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range SCREAMING_SNAKE_CASE__ : int = initializer_factor @classmethod def lowercase__ ( cls : Any , _lowercase : Union[str, os.PathLike] , **_lowercase : Union[str, Any] ): cls._set_token_in_kwargs(_lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = cls.get_config_dict(_lowercase , **_lowercase ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": SCREAMING_SNAKE_CASE__ : Dict = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_lowercase , **_lowercase ) class lowercase ( _UpperCAmelCase ): lowerCamelCase : str = '''owlvit_vision_model''' def __init__( self : Any , _lowercase : Dict=7_68 , _lowercase : List[str]=30_72 , _lowercase : Tuple=12 , _lowercase : Any=12 , _lowercase : Any=3 , _lowercase : Any=7_68 , _lowercase : Optional[int]=32 , _lowercase : Dict="quick_gelu" , _lowercase : Union[str, Any]=1E-5 , _lowercase : Optional[int]=0.0 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1.0 , **_lowercase : List[Any] , ): super().__init__(**_lowercase ) SCREAMING_SNAKE_CASE__ : Any = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Any = num_channels SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : str = patch_size SCREAMING_SNAKE_CASE__ : int = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps SCREAMING_SNAKE_CASE__ : Any = attention_dropout SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = initializer_factor @classmethod def lowercase__ ( cls : Any , _lowercase : Union[str, os.PathLike] , **_lowercase : Union[str, Any] ): cls._set_token_in_kwargs(_lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = cls.get_config_dict(_lowercase , **_lowercase ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": SCREAMING_SNAKE_CASE__ : str = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_lowercase , **_lowercase ) class lowercase ( _UpperCAmelCase ): lowerCamelCase : str = '''owlvit''' lowerCamelCase : int = True def __init__( self : Union[str, Any] , _lowercase : Dict=None , _lowercase : List[str]=None , _lowercase : Union[str, Any]=5_12 , _lowercase : Dict=2.6592 , _lowercase : Union[str, Any]=True , **_lowercase : List[str] , ): super().__init__(**_lowercase ) if text_config is None: SCREAMING_SNAKE_CASE__ : List[Any] = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) SCREAMING_SNAKE_CASE__ : Any = OwlViTTextConfig(**_lowercase ) SCREAMING_SNAKE_CASE__ : int = OwlViTVisionConfig(**_lowercase ) SCREAMING_SNAKE_CASE__ : Any = projection_dim SCREAMING_SNAKE_CASE__ : Dict = logit_scale_init_value SCREAMING_SNAKE_CASE__ : Union[str, Any] = return_dict SCREAMING_SNAKE_CASE__ : Any = 1.0 @classmethod def lowercase__ ( cls : List[Any] , _lowercase : Union[str, os.PathLike] , **_lowercase : Dict ): cls._set_token_in_kwargs(_lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = cls.get_config_dict(_lowercase , **_lowercase ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_lowercase , **_lowercase ) @classmethod def lowercase__ ( cls : int , _lowercase : Dict , _lowercase : Dict , **_lowercase : List[str] ): SCREAMING_SNAKE_CASE__ : str = {} SCREAMING_SNAKE_CASE__ : List[str] = text_config SCREAMING_SNAKE_CASE__ : Tuple = vision_config return cls.from_dict(_lowercase , **_lowercase ) def lowercase__ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ : str = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.text_config.to_dict() SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.vision_config.to_dict() SCREAMING_SNAKE_CASE__ : int = self.__class__.model_type return output class lowercase ( _UpperCAmelCase ): @property def lowercase__ ( self : int ): return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def lowercase__ ( self : Dict ): return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def lowercase__ ( self : Any ): return 1E-4 def lowercase__ ( self : Tuple , _lowercase : "ProcessorMixin" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : Optional["TensorType"] = None , ): SCREAMING_SNAKE_CASE__ : Optional[int] = super().generate_dummy_inputs( processor.tokenizer , batch_size=_lowercase , seq_length=_lowercase , framework=_lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] = super().generate_dummy_inputs( processor.image_processor , batch_size=_lowercase , framework=_lowercase ) return {**text_input_dict, **image_input_dict} @property def lowercase__ ( self : int ): return 14
35
import collections import os import re from pathlib import Path UpperCAmelCase_ = """src/transformers""" # Matches is_xxx_available() UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase_ = re.compile(r"""^\s*try:""") # Catches a line with else: UpperCAmelCase_ = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any: if _re_test_backend.search(_snake_case ) is None: return None _A = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any: with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() _A = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): _A = _re_one_line_import_struct.search(_snake_case ).groups()[0] _A = re.findall(r'''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: _A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: _A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any: def find_duplicates(_snake_case :Any ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> int: _A = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: _A = os.path.join(_snake_case , '''__init__.py''' ) _A = parse_init(_snake_case ) if objects is not None: _A = analyze_results(*_snake_case ) if len(_snake_case ) > 0: _A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: _A = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue _A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) _A = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue _A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) _A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules UpperCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A = direct_transformers_import(_snake_case ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f: _A = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_snake_case ) > 0: _A = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
2
0
def lowercase ( __A : list ) -> list: '''simple docstring''' if len(__A ) <= 1: return lst snake_case : List[Any] = 1 while i < len(__A ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case , snake_case : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case : int = 1 return lst if __name__ == "__main__": __lowercase : Dict = input('''Enter numbers separated by a comma:\n''').strip() __lowercase : Dict = [int(item) for item in user_input.split(''',''')] print(gnome_sort(unsorted))
36
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_A) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]: super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int: _A = load_image(__lowerCAmelCase ) if prompt is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(__lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase ) return model_outputs def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , ) } records.append(__lowerCAmelCase ) return records
2
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : List[str] = logging.get_logger(__name__) def UpperCamelCase_ ( __a , __a=False ) -> str: a__ : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" a__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def UpperCamelCase_ ( __a , __a , __a=False ) -> List[str]: for i in range(config.num_hidden_layers ): if base_model: a__ : Union[str, Any] = "" else: a__ : str = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a__ : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) a__ : Any = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict a__ : List[str] = in_proj_weight[ : config.hidden_size, : ] a__ : List[Any] = in_proj_bias[: config.hidden_size] a__ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a__ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a__ : str = in_proj_weight[ -config.hidden_size :, : ] a__ : Union[str, Any] = in_proj_bias[-config.hidden_size :] def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Union[str, Any] = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__a , __a ) def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : Optional[int] = dct.pop(__a ) a__ : str = val def UpperCamelCase_ ( ) -> Dict: a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : Dict = Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def UpperCamelCase_ ( __a , __a , __a=True ) -> str: a__ : Tuple = ViTConfig() # patch_size if model_name[-1] == "8": a__ : Any = 8 # set labels if required if not base_model: a__ : str = 1_000 a__ : Tuple = "huggingface/label-files" a__ : int = "imagenet-1k-id2label.json" a__ : Dict = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) a__ : List[Any] = {int(__a ): v for k, v in idalabel.items()} a__ : Tuple = idalabel a__ : Any = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: a__ : Dict = 384 a__ : Tuple = 1_536 a__ : str = 12 a__ : Union[str, Any] = 6 # load original model from torch hub a__ : List[Any] = torch.hub.load("facebookresearch/dino:main" , __a ) original_model.eval() # load state_dict of original model, remove and rename some keys a__ : Dict = original_model.state_dict() if base_model: remove_classification_head_(__a ) a__ : Any = create_rename_keys(__a , base_model=__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) read_in_q_k_v(__a , __a , __a ) # load HuggingFace model if base_model: a__ : List[str] = ViTModel(__a , add_pooling_layer=__a ).eval() else: a__ : List[str] = ViTForImageClassification(__a ).eval() model.load_state_dict(__a ) # Check outputs on an image, prepared by ViTImageProcessor a__ : Optional[int] = ViTImageProcessor() a__ : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) a__ : List[str] = encoding["pixel_values"] a__ : List[Any] = model(__a ) if base_model: a__ : List[Any] = original_model(__a ) assert torch.allclose(__a , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: a__ : List[str] = original_model(__a ) assert logits.shape == outputs.logits.shape assert torch.allclose(__a , outputs.logits , atol=1e-3 ) Path(__a ).mkdir(exist_ok=__a ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__a ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__a ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""dino_vitb16""", type=str, help="""Name of the model trained with DINO you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether to only convert the base model (no projection head weights).""", ) parser.set_defaults(base_model=True) UpperCamelCase : int = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
37
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str: _A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' ) _A = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
2
0
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") A_ : Tuple = logging.getLogger(__name__) @dataclass class __snake_case : '''simple docstring''' lowerCamelCase__ = field( default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) lowerCamelCase__ = field( default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , ) lowerCamelCase__ = field( default=1_024 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A csv or a json file containing the training data.'''} ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) lowerCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A csv or a json file containing the test data.'''} ) def __UpperCamelCase ( self ): if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: snake_case__ : str = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." snake_case__ : List[str] = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __snake_case : '''simple docstring''' lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) lowerCamelCase__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCamelCase__ = field( default=__SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def UpperCamelCase__ ( ) -> int: '''simple docstring''' snake_case__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case__ , snake_case__ , snake_case__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case__ , snake_case__ , snake_case__ : Optional[Any] = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) snake_case__ : str = training_args.get_process_log_level() logger.setLevel(__magic_name__ ) datasets.utils.logging.set_verbosity(__magic_name__ ) transformers.utils.logging.set_verbosity(__magic_name__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. snake_case__ : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: snake_case__ : Dict = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. snake_case__ : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. snake_case__ : List[str] = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: snake_case__ : int = data_args.train_file.split(""".""" )[-1] snake_case__ : int = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." snake_case__ : List[str] = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files snake_case__ : int = load_dataset("""csv""" , data_files=__magic_name__ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files snake_case__ : Tuple = load_dataset("""json""" , data_files=__magic_name__ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels snake_case__ : Dict = raw_datasets["""train"""].features["""label"""].names snake_case__ : Optional[Any] = len(__magic_name__ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case__ : int = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer snake_case__ : Dict = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__magic_name__ , ) snake_case__ : Tuple = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: snake_case__ : Dict = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch snake_case__ : Dict = False # Some models have set the order of the labels to use, so let's make sure we do use it. snake_case__ : Union[str, Any] = {"""Refused""": 0, """Entailed""": 1} snake_case__ : List[str] = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) snake_case__ : int = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__magic_name__ : Union[str, Any] ): # Tokenize the texts def _convert_table_text_to_pandas(__magic_name__ : int ): snake_case__ : List[Any] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] snake_case__ : Optional[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd snake_case__ : Optional[int] = examples["""statement"""] snake_case__ : Dict = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) snake_case__ : Optional[Any] = tokenizer(__magic_name__ , __magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ ) snake_case__ : Tuple = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): snake_case__ : List[str] = raw_datasets.map( __magic_name__ , batched=__magic_name__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) snake_case__ : Optional[int] = raw_datasets["""train"""] if data_args.max_train_samples is not None: snake_case__ : List[Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) snake_case__ : int = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: snake_case__ : Any = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) snake_case__ : List[Any] = raw_datasets["""test"""] if data_args.max_predict_samples is not None: snake_case__ : Dict = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__magic_name__ ) ) , 3 ): logger.info(f"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__magic_name__ : EvalPrediction ): snake_case__ : Union[str, Any] = p.predictions[0] if isinstance(p.predictions , __magic_name__ ) else p.predictions snake_case__ : Union[str, Any] = np.argmax(__magic_name__ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: snake_case__ : Union[str, Any] = default_data_collator elif training_args.fpaa: snake_case__ : Optional[int] = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) else: snake_case__ : str = None # Initialize our Trainer snake_case__ : str = Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: snake_case__ : Optional[Any] = None if training_args.resume_from_checkpoint is not None: snake_case__ : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: snake_case__ : Optional[int] = last_checkpoint snake_case__ : Any = trainer.train(resume_from_checkpoint=__magic_name__ ) snake_case__ : str = train_result.metrics snake_case__ : Dict = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__magic_name__ ) ) snake_case__ : Any = min(__magic_name__ , len(__magic_name__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __magic_name__ ) trainer.save_metrics("""train""" , __magic_name__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) snake_case__ : Optional[Any] = trainer.evaluate(eval_dataset=__magic_name__ ) snake_case__ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__magic_name__ ) snake_case__ : List[str] = min(__magic_name__ , len(__magic_name__ ) ) trainer.log_metrics("""eval""" , __magic_name__ ) trainer.save_metrics("""eval""" , __magic_name__ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. snake_case__ : Dict = predict_dataset.remove_columns("""label""" ) snake_case__ : str = trainer.predict(__magic_name__ , metric_key_prefix="""predict""" ).predictions snake_case__ : str = np.argmax(__magic_name__ , axis=1 ) snake_case__ : Tuple = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(__magic_name__ , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(__magic_name__ ): snake_case__ : List[str] = label_list[item] writer.write(f"{index}\t{item}\n" ) snake_case__ : Union[str, Any] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**__magic_name__ ) else: trainer.create_model_card(**__magic_name__ ) def UpperCamelCase__ ( __magic_name__ : str ) -> List[str]: '''simple docstring''' main() if __name__ == "__main__": main()
38
from graphs.minimum_spanning_tree_kruskal import kruskal def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _A = 9 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = kruskal(_snake_case , _snake_case ) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_snake_case ) == sorted(_snake_case )
2
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase_ = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
40
UpperCAmelCase_ = 2_5_6 # Modulus to hash a string UpperCAmelCase_ = 1_0_0_0_0_0_3 def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool: _A = len(_snake_case ) _A = len(_snake_case ) if p_len > t_len: return False _A = 0 _A = 0 _A = 1 # Calculating the hash of pattern and substring of text for i in range(_snake_case ): _A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def SCREAMING_SNAKE_CASE_ ( ) -> None: _A = '''abc1abc12''' _A = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _A = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case ) # Test 2) _A = '''ABABX''' _A = '''ABABZABABYABABX''' assert rabin_karp(_snake_case , _snake_case ) # Test 3) _A = '''AAAB''' _A = '''ABAAAAAB''' assert rabin_karp(_snake_case , _snake_case ) # Test 4) _A = '''abcdabcy''' _A = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(_snake_case , _snake_case ) # Test 5) _A = '''Lü''' _A = '''Lüsai''' assert rabin_karp(_snake_case , _snake_case ) _A = '''Lue''' assert not rabin_karp(_snake_case , _snake_case ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
2
0
'''simple docstring''' from typing import Any import numpy as np def _A ( A__ ): """simple docstring""" return np.array_equal(A__ , matrix.conjugate().T ) def _A ( A__ , A__ ): """simple docstring""" __lowercase = v.conjugate().T __lowercase = v_star.dot(A__ ) assert isinstance(A__ , np.ndarray ) return (v_star_dot.dot(A__ )) / (v_star.dot(A__ )) def _A ( ): """simple docstring""" __lowercase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) __lowercase = np.array([[1], [2], [3]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." print(rayleigh_quotient(A__ , A__ ) ) __lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." assert rayleigh_quotient(A__ , A__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
41
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } UpperCAmelCase_ = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } UpperCAmelCase_ = """</w>""" UpperCAmelCase_ = """@@ """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs # Speech2Text2 has no max input length UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict: super().__init__( unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , ) _A = do_lower_case with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) _A = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) _A = None _A = None else: with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: _A = merges_handle.read().split('''\n''' )[:-1] _A = [tuple(merge.split()[:2] ) for merge in merges] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = {} @property def snake_case_ ( self : List[str] ) -> int: return len(self.decoder ) def snake_case_ ( self : Dict ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: _A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(__lowerCAmelCase ): try: _A = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__lowerCAmelCase ) _A = new_word if len(__lowerCAmelCase ) == 1: break else: _A = get_pairs(__lowerCAmelCase ) _A = ''' '''.join(__lowerCAmelCase ) if word == "\n " + BPE_TOKEN_MERGES: _A = '''\n''' + BPE_TOKEN_MERGES if word.endswith(__lowerCAmelCase ): _A = word.replace(__lowerCAmelCase , '''''' ) _A = word.replace(''' ''' , __lowerCAmelCase ) _A = word return word def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: _A = text.lower() _A = text.split() _A = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int: return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str: _A = self.decoder.get(__lowerCAmelCase , self.unk_token ) return result def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str: _A = ''' '''.join(__lowerCAmelCase ) # make sure @@ tokens are concatenated _A = ''''''.join(string.split(__lowerCAmelCase ) ) return string def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) _A = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) _A = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
2
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ = logging.get_logger(__name__) A_ = { "shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = 'nat' SCREAMING_SNAKE_CASE_ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE_=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = embed_dim lowerCamelCase_ = depths lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = num_heads lowerCamelCase_ = kernel_size lowerCamelCase_ = mlp_ratio lowerCamelCase_ = qkv_bias lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = drop_path_rate lowerCamelCase_ = hidden_act lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase_ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) ) lowerCamelCase_ = layer_scale_init_value lowerCamelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )] lowerCamelCase_ ,lowerCamelCase_ = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
42
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Optional[int] ) -> None: _A = [] _A = {} _A = 0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Optional[int] ) -> str: return str(self.heap ) def snake_case_ ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _A , _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A = self.heap[0] self._bubble_down(__lowerCAmelCase ) return elem def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Update the weight of the given key _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[curr_pos] _A , _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_up(__lowerCAmelCase ) return None def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A = self.position_map[elem] _A , _A = self.heap[curr_pos] _A = get_child_left_position(__lowerCAmelCase ) _A = get_child_right_position(__lowerCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _A , _A = self.heap[child_left_position] _A , _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) if child_left_position < self.elements: _A , _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) else: return None if child_right_position < self.elements: _A , _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) return None def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: # Swap the nodes at the given positions _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A , _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : str ) -> None: _A = {} _A = 0 def __repr__( self : str ) -> str: return str(self.connections ) def __len__( self : Dict ) -> int: return self.nodes def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A = {} self.nodes += 1 def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCAmelCase ) self.add_node(__lowerCAmelCase ) _A = weight _A = weight def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_snake_case , _snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node return dist, parent
2
0
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class _a ( unittest.TestCase ): @slow def lowerCamelCase_ ( self: Any ) -> str: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = TFAutoModel.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = AutoModel.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: Tuple ) -> Optional[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = TFAutoModelForPreTraining.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = AutoModelForPreTraining.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: List[Any] ) -> Tuple: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained( UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_pt=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = AutoModelForCausalLM.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ ) lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained( UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_tf=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: Dict ) -> Union[str, Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = AutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained( UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_pt=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = AutoModelForMaskedLM.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ ) lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained( UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_tf=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained( UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_pt=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ ) lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained( UpperCamelCase_ , output_loading_info=UpperCamelCase_ , from_tf=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: int ) -> int: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: str ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = AutoModelForQuestionAnswering.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 14_410 ) lowercase__ = AutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 14_410 ) def lowerCamelCase_ ( self: Any ) -> List[Any]: """simple docstring""" lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_pt=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 14_410 ) lowercase__ = AutoModelWithLMHead.from_pretrained(UpperCamelCase_ , from_tf=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 14_410 )
43
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = """▁""" UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} UpperCAmelCase_ = { """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Tuple = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _A = vocab_file _A = monolingual_vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _A = {} _A = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = cnt cnt += 1 with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _A = line.strip().split()[0] _A = len(self.fairseq_tokens_to_ids ) if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = len(self.fairseq_tokens_to_ids ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> List[Any]: _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def snake_case_ ( self : Dict ) -> Optional[Any]: _A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]: return self.fairseq_ids_to_tokens[index] def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple: _A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__lowerCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
2
0
'''simple docstring''' def A_ ( _lowerCAmelCase : int ): """simple docstring""" if number > 0: raise ValueError("input must be a negative integer" ) _lowerCamelCase : str = len(bin(_lowerCAmelCase )[3:] ) _lowerCamelCase : List[str] = bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:] _lowerCamelCase : List[str] = ( ( "1" + "0" * (binary_number_length - len(_lowerCAmelCase )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
44
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]: _A , _A = set(_snake_case ), [start] while stack: _A = stack.pop() explored.add(_snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_snake_case ) return explored UpperCAmelCase_ = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
2
0
import math from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : str = """data2vec-audio""" def __init__( self :List[Any] , lowerCamelCase__ :List[str]=32 , lowerCamelCase__ :Optional[Any]=7_68 , lowerCamelCase__ :int=12 , lowerCamelCase__ :List[Any]=12 , lowerCamelCase__ :List[str]=30_72 , lowerCamelCase__ :int="gelu" , lowerCamelCase__ :Tuple=0.1 , lowerCamelCase__ :List[str]=0.1 , lowerCamelCase__ :Dict=0.1 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :Dict=0.02 , lowerCamelCase__ :List[str]=1e-5 , lowerCamelCase__ :Union[str, Any]="gelu" , lowerCamelCase__ :Optional[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase__ :int=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__ :Dict=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :List[Any]=16 , lowerCamelCase__ :Union[str, Any]=19 , lowerCamelCase__ :int=5 , lowerCamelCase__ :Dict=0.05 , lowerCamelCase__ :Union[str, Any]=10 , lowerCamelCase__ :Optional[int]=2 , lowerCamelCase__ :int=0.0 , lowerCamelCase__ :Dict=10 , lowerCamelCase__ :List[str]=0 , lowerCamelCase__ :Dict="sum" , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :int=False , lowerCamelCase__ :Dict=2_56 , lowerCamelCase__ :Any=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCamelCase__ :int=(5, 3, 3, 1, 1) , lowerCamelCase__ :Union[str, Any]=(1, 2, 3, 1, 1) , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=0 , lowerCamelCase__ :str=1 , lowerCamelCase__ :Optional[int]=2 , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :Optional[Any]=3 , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=None , **lowerCamelCase__ :Any , ): super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ ) UpperCamelCase__ :int = hidden_size UpperCamelCase__ :int = feat_extract_activation UpperCamelCase__ :str = list(lowerCamelCase__ ) UpperCamelCase__ :Tuple = list(lowerCamelCase__ ) UpperCamelCase__ :List[Any] = list(lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = conv_bias UpperCamelCase__ :Any = num_conv_pos_embeddings UpperCamelCase__ :Optional[Any] = num_conv_pos_embedding_groups UpperCamelCase__ :Optional[Any] = conv_pos_kernel_size UpperCamelCase__ :int = len(self.conv_dim ) UpperCamelCase__ :List[str] = num_hidden_layers UpperCamelCase__ :List[Any] = intermediate_size UpperCamelCase__ :Any = hidden_act UpperCamelCase__ :List[Any] = num_attention_heads UpperCamelCase__ :int = hidden_dropout UpperCamelCase__ :Dict = attention_dropout UpperCamelCase__ :Any = activation_dropout UpperCamelCase__ :int = feat_proj_dropout UpperCamelCase__ :List[Any] = final_dropout UpperCamelCase__ :List[Any] = layerdrop UpperCamelCase__ :List[Any] = layer_norm_eps UpperCamelCase__ :Any = initializer_range UpperCamelCase__ :Any = vocab_size UpperCamelCase__ :str = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCamelCase__ :Tuple = mask_time_prob UpperCamelCase__ :Union[str, Any] = mask_time_length UpperCamelCase__ :Any = mask_time_min_masks UpperCamelCase__ :Tuple = mask_feature_prob UpperCamelCase__ :Optional[Any] = mask_feature_length UpperCamelCase__ :Tuple = mask_feature_min_masks # ctc loss UpperCamelCase__ :List[str] = ctc_loss_reduction UpperCamelCase__ :Optional[int] = ctc_zero_infinity # adapter UpperCamelCase__ :Optional[int] = add_adapter UpperCamelCase__ :List[str] = adapter_kernel_size UpperCamelCase__ :Dict = adapter_stride UpperCamelCase__ :Optional[Any] = num_adapter_layers UpperCamelCase__ :Union[str, Any] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCamelCase__ :Any = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCamelCase__ :List[str] = list(lowerCamelCase__ ) UpperCamelCase__ :int = list(lowerCamelCase__ ) UpperCamelCase__ :List[str] = list(lowerCamelCase__ ) UpperCamelCase__ :Union[str, Any] = xvector_output_dim @property def __a ( self :Optional[Any] ): return math.prod(self.conv_stride )
45
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
2
0
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = [] embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", F"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", F"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", F"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", F"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : Optional[Any] = [] attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def lowerCamelCase_( _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Tuple = [] token.append((F"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") ) return token def lowerCamelCase_( ) -> int: '''simple docstring''' _lowerCamelCase : List[str] = [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = 1000 _lowerCamelCase : str = "huggingface/label-files" _lowerCamelCase : List[Any] = num_labels _lowerCamelCase : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) ) _lowerCamelCase : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Any = idalabel _lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()} _lowerCamelCase : List[str] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": _lowerCamelCase : List[str] = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": _lowerCamelCase : Dict = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: _lowerCamelCase : Tuple = [2, 2, 20] _lowerCamelCase : List[str] = [3, 12, 16] _lowerCamelCase : Optional[Any] = [192, 768, 1024] _lowerCamelCase : Union[str, Any] = CvtForImageClassification(_lowerCamelCase ) _lowerCamelCase : Any = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) _lowerCamelCase : Any = OrderedDict() _lowerCamelCase : Dict = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: _lowerCamelCase : Tuple = list_of_state_dict + cls_token(_lowerCamelCase ) _lowerCamelCase : Optional[int] = list_of_state_dict + embeddings(_lowerCamelCase ) for cnt in range(config.depth[idx] ): _lowerCamelCase : Dict = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : str = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) ): _lowerCamelCase : Optional[Any] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowerCAmelCase : Tuple = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
46
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "xlnet" a__ : Dict = ["mems"] a__ : List[str] = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple: _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs['''use_cache'''] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
2
0
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE__ = { '''allenai/led-base-16384''': 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def UpperCAmelCase__ ( ): __a : List[str] = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) __a : str = bs[:] __a : List[Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCamelCase_ ) cs.append(2**8 + n ) n += 1 __a : Dict = [chr(lowerCamelCase_ ) for n in cs] return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ): __a : Optional[Any] = set() __a : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __a : Dict = char return pairs class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple="replace" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : int="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ): '''simple docstring''' __a : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token __a : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token __a : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token __a : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token __a : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token __a : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __a : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as vocab_handle: __a : List[Any] = json.load(SCREAMING_SNAKE_CASE__ ) __a : List[str] = {v: k for k, v in self.encoder.items()} __a : Dict = errors # how to handle errors in decoding __a : List[Any] = bytes_to_unicode() __a : List[Any] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as merges_handle: __a : str = merges_handle.read().split('\n' )[1:-1] __a : List[str] = [tuple(merge.split() ) for merge in bpe_merges] __a : str = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __a : Tuple = {} __a : Union[str, Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __a : Optional[Any] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] __a : List[Any] = tuple(SCREAMING_SNAKE_CASE__ ) __a : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: return token while True: __a : Tuple = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break __a , __a : str = bigram __a : Optional[Any] = [] __a : List[str] = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __a : str = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __a : List[Any] = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __a : Optional[int] = tuple(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __a : Dict = get_pairs(SCREAMING_SNAKE_CASE__ ) __a : Tuple = ' '.join(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = word return word def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' __a : int = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ): __a : Optional[Any] = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(' ' ) ) return bpe_tokens def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' __a : Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : int = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __a : Optional[int] = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '\n' ) __a : List[Any] = 0 with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) __a : Optional[int] = token_index writer.write(' '.join(SCREAMING_SNAKE_CASE__ ) + '\n' ) index += 1 return vocab_file, merge_file def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a : Union[str, Any] = [self.cls_token_id] __a : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' __a : Any = [self.sep_token_id] __a : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' __a : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()): __a : Tuple = ' ' + text return (text, kwargs) def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Dict[str, EncodedInput], BatchEncoding] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ): '''simple docstring''' __a : Union[str, Any] = super()._pad( encoded_inputs=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) # Load from model defaults if return_attention_mask is None: __a : Tuple = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __a : Optional[Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __a : int = len(encoded_inputs['global_attention_mask'] ) != len(SCREAMING_SNAKE_CASE__ ) if needs_to_be_padded: __a : Any = len(SCREAMING_SNAKE_CASE__ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __a : Optional[Any] = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": __a : List[str] = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
47
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str: return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class A ( SCREAMING_SNAKE_CASE__ ): def __get__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any]=None ): """simple docstring""" if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute" ) lowerCAmelCase__ = "__cached_" + self.fget.__name__ lowerCAmelCase__ = getattr(__magic_name__ , __magic_name__ , __magic_name__ ) if cached is None: lowerCAmelCase__ = self.fget(__magic_name__ ) setattr(__magic_name__ , __magic_name__ , __magic_name__ ) return cached def A ( UpperCamelCase_ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase__ = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def A ( UpperCamelCase_ : List[Any] ) -> Any: '''simple docstring''' if is_torch_fx_proxy(UpperCamelCase_ ): return True if is_torch_available(): import torch if isinstance(UpperCamelCase_ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCamelCase_ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCamelCase_ , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCamelCase_ , np.ndarray ) def A ( UpperCamelCase_ : Any ) -> Any: '''simple docstring''' return isinstance(UpperCamelCase_ , np.ndarray ) def A ( UpperCamelCase_ : str ) -> Union[str, Any]: '''simple docstring''' return _is_numpy(UpperCamelCase_ ) def A ( UpperCamelCase_ : Union[str, Any] ) -> Dict: '''simple docstring''' import torch return isinstance(UpperCamelCase_ , torch.Tensor ) def A ( UpperCamelCase_ : str ) -> Optional[Any]: '''simple docstring''' return False if not is_torch_available() else _is_torch(UpperCamelCase_ ) def A ( UpperCamelCase_ : Union[str, Any] ) -> str: '''simple docstring''' import torch return isinstance(UpperCamelCase_ , torch.device ) def A ( UpperCamelCase_ : Any ) -> str: '''simple docstring''' return False if not is_torch_available() else _is_torch_device(UpperCamelCase_ ) def A ( UpperCamelCase_ : Optional[Any] ) -> List[str]: '''simple docstring''' import torch if isinstance(UpperCamelCase_ , UpperCamelCase_ ): if hasattr(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ) else: return False return isinstance(UpperCamelCase_ , torch.dtype ) def A ( UpperCamelCase_ : int ) -> Any: '''simple docstring''' return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase_ ) def A ( UpperCamelCase_ : str ) -> Tuple: '''simple docstring''' import tensorflow as tf return isinstance(UpperCamelCase_ , tf.Tensor ) def A ( UpperCamelCase_ : Union[str, Any] ) -> List[str]: '''simple docstring''' return False if not is_tf_available() else _is_tensorflow(UpperCamelCase_ ) def A ( UpperCamelCase_ : Optional[int] ) -> str: '''simple docstring''' import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCamelCase_ , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(UpperCamelCase_ ) return type(UpperCamelCase_ ) == tf.Tensor def A ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase_ ) def A ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' import jax.numpy as jnp # noqa: F811 return isinstance(UpperCamelCase_ , jnp.ndarray ) def A ( UpperCamelCase_ : Union[str, Any] ) -> str: '''simple docstring''' return False if not is_flax_available() else _is_jax(UpperCamelCase_ ) def A ( UpperCamelCase_ : List[str] ) -> Tuple: '''simple docstring''' if isinstance(UpperCamelCase_ , (dict, UserDict) ): return {k: to_py_obj(UpperCamelCase_ ) for k, v in obj.items()} elif isinstance(UpperCamelCase_ , (list, tuple) ): return [to_py_obj(UpperCamelCase_ ) for o in obj] elif is_tf_tensor(UpperCamelCase_ ): return obj.numpy().tolist() elif is_torch_tensor(UpperCamelCase_ ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCamelCase_ ): return np.asarray(UpperCamelCase_ ).tolist() elif isinstance(UpperCamelCase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def A ( UpperCamelCase_ : Tuple ) -> Optional[Any]: '''simple docstring''' if isinstance(UpperCamelCase_ , (dict, UserDict) ): return {k: to_numpy(UpperCamelCase_ ) for k, v in obj.items()} elif isinstance(UpperCamelCase_ , (list, tuple) ): return np.array(UpperCamelCase_ ) elif is_tf_tensor(UpperCamelCase_ ): return obj.numpy() elif is_torch_tensor(UpperCamelCase_ ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCamelCase_ ): return np.asarray(UpperCamelCase_ ) else: return obj class A ( SCREAMING_SNAKE_CASE__ ): def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = fields(self ) # Safety and consistency checks if not len(__magic_name__ ): raise ValueError(f"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" ) lowerCAmelCase__ = getattr(self , class_fields[0].name ) lowerCAmelCase__ = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__magic_name__ ): if isinstance(__magic_name__ , __magic_name__ ): lowerCAmelCase__ = first_field.items() lowerCAmelCase__ = True else: try: lowerCAmelCase__ = iter(__magic_name__ ) lowerCAmelCase__ = True except TypeError: lowerCAmelCase__ = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__magic_name__ ): if ( not isinstance(__magic_name__ , (list, tuple) ) or not len(__magic_name__ ) == 2 or not isinstance(element[0] , __magic_name__ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute lowerCAmelCase__ = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: lowerCAmelCase__ = element[1] elif first_field is not None: lowerCAmelCase__ = first_field else: for field in class_fields: lowerCAmelCase__ = getattr(self , field.name ) if v is not None: lowerCAmelCase__ = v def __delitem__( self : Union[str, Any] , *__magic_name__ : Tuple , **__magic_name__ : Dict ): """simple docstring""" raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def __SCREAMING_SNAKE_CASE ( self : Tuple , *__magic_name__ : Tuple , **__magic_name__ : Dict ): """simple docstring""" raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def __SCREAMING_SNAKE_CASE ( self : int , *__magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ): """simple docstring""" raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , *__magic_name__ : List[Any] , **__magic_name__ : List[Any] ): """simple docstring""" raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self : Union[str, Any] , __magic_name__ : Optional[int] ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): lowerCAmelCase__ = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__magic_name__ , __magic_name__ ) super().__setattr__(__magic_name__ , __magic_name__ ) def __setitem__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] ): """simple docstring""" super().__setitem__(__magic_name__ , __magic_name__ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" return tuple(self[k] for k in self.keys() ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): @classmethod def __SCREAMING_SNAKE_CASE ( cls : int , __magic_name__ : Optional[int] ): """simple docstring""" raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :int = 'longest' snake_case__ :Optional[Any] = 'max_length' snake_case__ :str = 'do_not_pad' class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'pt' snake_case__ :Union[str, Any] = 'tf' snake_case__ :Any = 'np' snake_case__ :List[str] = 'jax' class A : def __init__( self : Dict , __magic_name__ : List[ContextManager] ): """simple docstring""" lowerCAmelCase__ = context_managers lowerCAmelCase__ = ExitStack() def __enter__( self : str ): """simple docstring""" for context_manager in self.context_managers: self.stack.enter_context(__magic_name__ ) def __exit__( self : Dict , *__magic_name__ : Dict , **__magic_name__ : int ): """simple docstring""" self.stack.__exit__(*__magic_name__ , **__magic_name__ ) def A ( UpperCamelCase_ : Dict ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = infer_framework(UpperCamelCase_ ) if framework == "tf": lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models else: lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def A ( UpperCamelCase_ : int ) -> int: '''simple docstring''' lowerCAmelCase__ = model_class.__name__ lowerCAmelCase__ = infer_framework(UpperCamelCase_ ) if framework == "tf": lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models else: lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def A ( UpperCamelCase_ : MutableMapping , UpperCamelCase_ : str = "" , UpperCamelCase_ : str = "." ) -> List[Any]: '''simple docstring''' def _flatten_dict(UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]="" , UpperCamelCase_ : Any="." ): for k, v in d.items(): lowerCAmelCase__ = str(UpperCamelCase_ ) + delimiter + str(UpperCamelCase_ ) if parent_key else k if v and isinstance(UpperCamelCase_ , UpperCamelCase_ ): yield from flatten_dict(UpperCamelCase_ , UpperCamelCase_ , delimiter=UpperCamelCase_ ).items() else: yield key, v return dict(_flatten_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ) @contextmanager def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : bool = False ) -> Any: '''simple docstring''' if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=None ) -> Tuple: '''simple docstring''' if is_numpy_array(UpperCamelCase_ ): return np.transpose(UpperCamelCase_ , axes=UpperCamelCase_ ) elif is_torch_tensor(UpperCamelCase_ ): return array.T if axes is None else array.permute(*UpperCamelCase_ ) elif is_tf_tensor(UpperCamelCase_ ): import tensorflow as tf return tf.transpose(UpperCamelCase_ , perm=UpperCamelCase_ ) elif is_jax_tensor(UpperCamelCase_ ): return jnp.transpose(UpperCamelCase_ , axes=UpperCamelCase_ ) else: raise ValueError(F"""Type not supported for transpose: {type(UpperCamelCase_ )}.""" ) def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Dict: '''simple docstring''' if is_numpy_array(UpperCamelCase_ ): return np.reshape(UpperCamelCase_ , UpperCamelCase_ ) elif is_torch_tensor(UpperCamelCase_ ): return array.reshape(*UpperCamelCase_ ) elif is_tf_tensor(UpperCamelCase_ ): import tensorflow as tf return tf.reshape(UpperCamelCase_ , UpperCamelCase_ ) elif is_jax_tensor(UpperCamelCase_ ): return jnp.reshape(UpperCamelCase_ , UpperCamelCase_ ) else: raise ValueError(F"""Type not supported for reshape: {type(UpperCamelCase_ )}.""" ) def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=None ) -> Optional[int]: '''simple docstring''' if is_numpy_array(UpperCamelCase_ ): return np.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ ) elif is_torch_tensor(UpperCamelCase_ ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase_ ) elif is_tf_tensor(UpperCamelCase_ ): import tensorflow as tf return tf.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ ) elif is_jax_tensor(UpperCamelCase_ ): return jnp.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ ) else: raise ValueError(F"""Type not supported for squeeze: {type(UpperCamelCase_ )}.""" ) def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ) -> List[Any]: '''simple docstring''' if is_numpy_array(UpperCamelCase_ ): return np.expand_dims(UpperCamelCase_ , UpperCamelCase_ ) elif is_torch_tensor(UpperCamelCase_ ): return array.unsqueeze(dim=UpperCamelCase_ ) elif is_tf_tensor(UpperCamelCase_ ): import tensorflow as tf return tf.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ ) elif is_jax_tensor(UpperCamelCase_ ): return jnp.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ ) else: raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase_ )}.""" ) def A ( UpperCamelCase_ : List[str] ) -> Dict: '''simple docstring''' if is_numpy_array(UpperCamelCase_ ): return np.size(UpperCamelCase_ ) elif is_torch_tensor(UpperCamelCase_ ): return array.numel() elif is_tf_tensor(UpperCamelCase_ ): import tensorflow as tf return tf.size(UpperCamelCase_ ) elif is_jax_tensor(UpperCamelCase_ ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase_ )}.""" ) def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ) -> Dict: '''simple docstring''' for key, value in auto_map.items(): if isinstance(UpperCamelCase_ , (tuple, list) ): lowerCAmelCase__ = [F"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: lowerCAmelCase__ = F"""{repo_id}--{value}""" return auto_map def A ( UpperCamelCase_ : str ) -> Any: '''simple docstring''' for base_class in inspect.getmro(UpperCamelCase_ ): lowerCAmelCase__ = base_class.__module__ lowerCAmelCase__ = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
48
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
"""simple docstring""" def lowercase__ ( snake_case_ :int = 100 ): __UpperCAmelCase = n * (n + 1) * (2 * n + 1) / 6 __UpperCAmelCase = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f"""{solution() = }""")
49
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : List[str] = { 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Union[str, Any] = [ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
0
'''simple docstring''' def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: """simple docstring""" UpperCAmelCase = (boundary[1] - boundary[0]) / steps UpperCAmelCase = boundary[0] UpperCAmelCase = boundary[1] UpperCAmelCase = make_points(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = 0.0 y += (h / 2.0) * f(SCREAMING_SNAKE_CASE_ ) for i in x_i: # print(i) y += h * f(SCREAMING_SNAKE_CASE_ ) y += (h / 2.0) * f(SCREAMING_SNAKE_CASE_ ) return y def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: """simple docstring""" UpperCAmelCase = a + h while x < (b - h): yield x UpperCAmelCase = x + h def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: # enter your function here """simple docstring""" UpperCAmelCase = (x - 0) * (x - 0) return y def __snake_case ( ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase = 0.0 # Lower bound of integration UpperCAmelCase = 1.0 # Upper bound of integration UpperCAmelCase = 10.0 # define number of steps or resolution UpperCAmelCase = [a, b] # define boundary of integration UpperCAmelCase = method_a(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(f"y = {y}" ) if __name__ == "__main__": main()
51
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
0
"""simple docstring""" from collections import defaultdict def __A ( a_ :int) -> int: __a : Dict = 1 __a : Any = True for v in tree[start]: if v not in visited: ret += dfs(a_) if ret % 2 == 0: cuts.append(a_) return ret def __A ( ) -> List[Any]: dfs(1) if __name__ == "__main__": A , A = 10, 9 A = defaultdict(list) A = {} A = [] A = 0 A = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
52
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
0
def a_ ( lowerCAmelCase_ : str ): __lowerCAmelCase = '' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def a_ ( lowerCAmelCase_ : str ): __lowerCAmelCase = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __lowerCAmelCase = remove_duplicates(key.upper() ) __lowerCAmelCase = len(lowerCAmelCase_ ) # First fill cipher with key characters __lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(lowerCAmelCase_ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(lowerCAmelCase_ ), 26 ): __lowerCAmelCase = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __lowerCAmelCase = alphabet[i - offset] __lowerCAmelCase = char return cipher_alphabet def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : dict[str, str] ): return "".join(cipher_map.get(lowerCAmelCase_, lowerCAmelCase_ ) for ch in message.upper() ) def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : dict[str, str] ): __lowerCAmelCase = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(lowerCAmelCase_, lowerCAmelCase_ ) for ch in message.upper() ) def a_ ( ): __lowerCAmelCase = input('Enter message to encode or decode: ' ).strip() __lowerCAmelCase = input('Enter keyword: ' ).strip() __lowerCAmelCase = input('Encipher or decipher? E/D:' ).strip()[0].lower() try: __lowerCAmelCase = {'e': encipher, 'd': decipher}[option] except KeyError: raise KeyError('invalid input option' ) __lowerCAmelCase = create_cipher_map(lowerCAmelCase_ ) print(func(lowerCAmelCase_, lowerCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
53
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = "openai-gpt" a__ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = afn _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_first_dropout _A = summary_proj_to_labels super().__init__(**__lowerCAmelCase )
2
0
from decimal import Decimal, getcontext from math import ceil, factorial def a__ ( lowercase__ ): '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) UpperCAmelCase_ =precision UpperCAmelCase_ =ceil(precision / 1_4 ) UpperCAmelCase_ =4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt() UpperCAmelCase_ =1 UpperCAmelCase_ =1_3_5_9_1_4_0_9 UpperCAmelCase_ =Decimal(lowercase__ ) for k in range(1 , lowercase__ ): UpperCAmelCase_ =factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3) linear_term += 5_4_5_1_4_0_1_3_4 exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __lowercase : List[str] =50 print(f"""The first {n} digits of pi is: {pi(n)}""")
54
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
0
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = (CMStochasticIterativeScheduler,) snake_case_ = 10 def UpperCamelCase_ ( self : str ,**A : List[str] ): __A = { "num_train_timesteps": 2_01, "sigma_min": 0.0_02, "sigma_max": 80.0, } config.update(**A ) return config def UpperCamelCase_ ( self : int ): __A = 10 __A = self.get_scheduler_config() __A = self.scheduler_classes[0](**A ) scheduler.set_timesteps(A ) __A = scheduler.timesteps[0] __A = scheduler.timesteps[1] __A = self.dummy_sample __A = 0.1 * sample __A = scheduler.step(A ,A ,A ).prev_sample __A = scheduler.step(A ,A ,A ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def UpperCamelCase_ ( self : Dict ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=A ) def UpperCamelCase_ ( self : Dict ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=A ) def UpperCamelCase_ ( self : str ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config() __A = scheduler_class(**A ) __A = 1 scheduler.set_timesteps(A ) __A = scheduler.timesteps __A = torch.manual_seed(0 ) __A = self.dummy_model() __A = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(A ): # 1. scale model input __A = scheduler.scale_model_input(A ,A ) # 2. predict noise residual __A = model(A ,A ) # 3. predict previous sample x_t-1 __A = scheduler.step(A ,A ,A ,generator=A ).prev_sample __A = pred_prev_sample __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def UpperCamelCase_ ( self : Union[str, Any] ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config() __A = scheduler_class(**A ) __A = [1_06, 0] scheduler.set_timesteps(timesteps=A ) __A = scheduler.timesteps __A = torch.manual_seed(0 ) __A = self.dummy_model() __A = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __A = scheduler.scale_model_input(A ,A ) # 2. predict noise residual __A = model(A ,A ) # 3. predict previous sample x_t-1 __A = scheduler.step(A ,A ,A ,generator=A ).prev_sample __A = pred_prev_sample __A = torch.sum(torch.abs(A ) ) __A = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def UpperCamelCase_ ( self : Dict ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config() __A = scheduler_class(**A ) __A = [39, 30, 12, 15, 0] with self.assertRaises(A ,msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=A ) def UpperCamelCase_ ( self : Optional[Any] ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config() __A = scheduler_class(**A ) __A = [39, 30, 12, 1, 0] __A = len(A ) with self.assertRaises(A ,msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=A ,timesteps=A ) def UpperCamelCase_ ( self : Optional[int] ): __A = self.scheduler_classes[0] __A = self.get_scheduler_config() __A = scheduler_class(**A ) __A = [scheduler.config.num_train_timesteps] with self.assertRaises( A ,msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" ,): scheduler.set_timesteps(timesteps=A )
55
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
0
'''simple docstring''' import math import unittest def _a (lowercase__ : int ) -> bool: """simple docstring""" assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class _lowercase ( unittest.TestCase ): def a ( self : Optional[Any] ) -> Any: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def a ( self : Tuple ) -> Dict: with self.assertRaises(SCREAMING_SNAKE_CASE_ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
56
import itertools import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _A = 2 while True: if is_prime(_snake_case ): yield num num += 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) ) if __name__ == "__main__": print(f'{solution() = }')
2
0
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case (UpperCAmelCase__ ) -> int: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class _lowerCAmelCase: """simple docstring""" def _a ( self , _lowerCamelCase , _lowerCamelCase ): pass def _a ( self ): pass def _a ( self ): pass def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: str = np.abs((a - b) ).max() self.assertLessEqual(_lowerCamelCase , _lowerCamelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ): UpperCamelCase_: str = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_: Any = FlaxVisionTextDualEncoderModel(_lowerCamelCase ) UpperCamelCase_: Optional[int] = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ): UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_: List[str] = {'vision_model': vision_model, 'text_model': text_model} UpperCamelCase_: List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase ) UpperCamelCase_: Union[str, Any] = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ): UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_: Optional[int] = {'vision_model': vision_model, 'text_model': text_model} UpperCamelCase_: Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase ) UpperCamelCase_: Optional[Any] = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase ) UpperCamelCase_: Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCamelCase ) UpperCamelCase_: Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase ) UpperCamelCase_: Any = model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase ) UpperCamelCase_: int = after_output[0] UpperCamelCase_: Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCamelCase , 1e-3 ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ): UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_: List[Any] = {'vision_model': vision_model, 'text_model': text_model} UpperCamelCase_: Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase ) UpperCamelCase_: List[str] = model( input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , output_attentions=_lowerCamelCase ) UpperCamelCase_: List[str] = output.vision_model_output.attentions self.assertEqual(len(_lowerCamelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase_: Any = to_atuple(vision_model.config.image_size ) UpperCamelCase_: Union[str, Any] = to_atuple(vision_model.config.patch_size ) UpperCamelCase_: int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCamelCase_: Optional[int] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) UpperCamelCase_: str = output.text_model_output.attentions self.assertEqual(len(_lowerCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): pt_model.to(_lowerCamelCase ) pt_model.eval() # prepare inputs UpperCamelCase_: Optional[Any] = inputs_dict UpperCamelCase_: Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): UpperCamelCase_: Union[str, Any] = pt_model(**_lowerCamelCase ).to_tuple() UpperCamelCase_: List[Any] = fx_model(**_lowerCamelCase ).to_tuple() self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(_lowerCamelCase , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_lowerCamelCase ) UpperCamelCase_: int = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase ) UpperCamelCase_: List[Any] = fx_model_loaded(**_lowerCamelCase ).to_tuple() self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(_lowerCamelCase , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_lowerCamelCase ) UpperCamelCase_: Any = VisionTextDualEncoderModel.from_pretrained(_lowerCamelCase , from_flax=_lowerCamelCase ) pt_model_loaded.to(_lowerCamelCase ) pt_model_loaded.eval() with torch.no_grad(): UpperCamelCase_: int = pt_model_loaded(**_lowerCamelCase ).to_tuple() self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(_lowerCamelCase , pt_output_loaded.numpy() , 4e-2 ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_: int = VisionTextDualEncoderModel(_lowerCamelCase ) UpperCamelCase_: str = FlaxVisionTextDualEncoderModel(_lowerCamelCase ) UpperCamelCase_: List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCamelCase ) UpperCamelCase_: List[str] = fx_state self.check_pt_flax_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: str = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_: Tuple = VisionTextDualEncoderModel(_lowerCamelCase ) UpperCamelCase_: str = FlaxVisionTextDualEncoderModel(_lowerCamelCase ) UpperCamelCase_: List[Any] = load_flax_weights_in_pytorch_model(_lowerCamelCase , fx_model.params ) self.check_pt_flax_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( self ): UpperCamelCase_: str = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_lowerCamelCase ) def _a ( self ): UpperCamelCase_: int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_lowerCamelCase ) def _a ( self ): UpperCamelCase_: Optional[Any] = self.prepare_config_and_inputs() self.check_save_load(**_lowerCamelCase ) def _a ( self ): UpperCamelCase_: int = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_lowerCamelCase ) @is_pt_flax_cross_test def _a ( self ): UpperCamelCase_: Dict = self.prepare_config_and_inputs() UpperCamelCase_: str = config_inputs_dict.pop('vision_config' ) UpperCamelCase_: Dict = config_inputs_dict.pop('text_config' ) UpperCamelCase_: int = config_inputs_dict self.check_equivalence_pt_to_flax(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) self.check_equivalence_flax_to_pt(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @slow def _a ( self ): UpperCamelCase_ ,UpperCamelCase_: int = self.get_pretrained_model_and_inputs() UpperCamelCase_: Optional[Any] = model_a(**_lowerCamelCase ) UpperCamelCase_: Any = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_lowerCamelCase ) UpperCamelCase_: str = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase ) UpperCamelCase_: str = model_a(**_lowerCamelCase ) UpperCamelCase_: str = after_outputs[0] UpperCamelCase_: Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCamelCase , 1e-5 ) @require_flax class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" def _a ( self ): UpperCamelCase_: Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_lowerCamelCase , text_from_pt=_lowerCamelCase , ) UpperCamelCase_: List[Any] = 1_3 UpperCamelCase_: Tuple = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) UpperCamelCase_: Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) UpperCamelCase_: str = random_attention_mask([batch_size, 4] ) UpperCamelCase_: Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _a ( self , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: Any = FlaxViTModel(_lowerCamelCase ) UpperCamelCase_: Dict = FlaxBertModel(_lowerCamelCase ) return vision_model, text_model def _a ( self ): UpperCamelCase_: Union[str, Any] = FlaxViTModelTester(self ) UpperCamelCase_: Dict = FlaxBertModelTester(self ) UpperCamelCase_: Tuple = vit_model_tester.prepare_config_and_inputs() UpperCamelCase_: Optional[int] = bert_model_tester.prepare_config_and_inputs() UpperCamelCase_ ,UpperCamelCase_: Tuple = vision_config_and_inputs UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" def _a ( self ): UpperCamelCase_: int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_lowerCamelCase , text_from_pt=_lowerCamelCase , ) UpperCamelCase_: Union[str, Any] = 1_3 UpperCamelCase_: Optional[int] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) UpperCamelCase_: Optional[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) UpperCamelCase_: Any = random_attention_mask([batch_size, 4] ) UpperCamelCase_: List[str] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _a ( self , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: str = FlaxCLIPVisionModel(_lowerCamelCase ) UpperCamelCase_: int = FlaxBertModel(_lowerCamelCase ) return vision_model, text_model def _a ( self ): UpperCamelCase_: int = FlaxCLIPVisionModelTester(self ) UpperCamelCase_: Tuple = FlaxBertModelTester(self ) UpperCamelCase_: List[str] = clip_model_tester.prepare_config_and_inputs() UpperCamelCase_: int = bert_model_tester.prepare_config_and_inputs() UpperCamelCase_ ,UpperCamelCase_: Optional[int] = vision_config_and_inputs UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" @slow def _a ( self ): UpperCamelCase_: Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 ) UpperCamelCase_: Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) UpperCamelCase_: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) UpperCamelCase_: Any = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=_lowerCamelCase , padding=_lowerCamelCase , return_tensors='np' ) UpperCamelCase_: Union[str, Any] = model(**_lowerCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) UpperCamelCase_: str = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image , _lowerCamelCase , atol=1e-3 ) )
57
import collections import os import re from pathlib import Path UpperCAmelCase_ = """src/transformers""" # Matches is_xxx_available() UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase_ = re.compile(r"""^\s*try:""") # Catches a line with else: UpperCAmelCase_ = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any: if _re_test_backend.search(_snake_case ) is None: return None _A = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any: with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() _A = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): _A = _re_one_line_import_struct.search(_snake_case ).groups()[0] _A = re.findall(r'''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: _A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: _A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any: def find_duplicates(_snake_case :Any ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> int: _A = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: _A = os.path.join(_snake_case , '''__init__.py''' ) _A = parse_init(_snake_case ) if objects is not None: _A = analyze_results(*_snake_case ) if len(_snake_case ) > 0: _A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: _A = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue _A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) _A = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue _A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) _A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules UpperCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A = direct_transformers_import(_snake_case ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f: _A = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_snake_case ) > 0: _A = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
2
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : List[str] = { '''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ['''MobileViTFeatureExtractor'''] __lowerCAmelCase : Optional[int] = ['''MobileViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = [ '''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileViTForImageClassification''', '''MobileViTForSemanticSegmentation''', '''MobileViTModel''', '''MobileViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ '''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileViTForImageClassification''', '''TFMobileViTForSemanticSegmentation''', '''TFMobileViTModel''', '''TFMobileViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
58
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_A) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]: super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int: _A = load_image(__lowerCAmelCase ) if prompt is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(__lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase ) return model_outputs def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , ) } records.append(__lowerCAmelCase ) return records
2
0
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder __A = logging.get_logger(__name__) # pylint: disable=invalid-name __A = 256 class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["melgan"] def __init__(self : Tuple , UpperCAmelCase_ : SpectrogramNotesEncoder , UpperCAmelCase_ : SpectrogramContEncoder , UpperCAmelCase_ : TaFilmDecoder , UpperCAmelCase_ : DDPMScheduler , UpperCAmelCase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) ->None: '''simple docstring''' super().__init__() # From MELGAN lowerCamelCase__: List[Any] =math.log(1E-5) # Matches MelGAN training. lowerCamelCase__: str =4.0 # Largest value for most examples lowerCamelCase__: Dict =128 self.register_modules( notes_encoder=UpperCAmelCase_ , continuous_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , melgan=UpperCAmelCase_ , ) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=(-1.0, 1.0) , UpperCAmelCase_ : Dict=False) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: str =output_range if clip: lowerCamelCase__: Any =torch.clip(UpperCAmelCase_ , self.min_value , self.max_value) # Scale to [0, 1]. lowerCamelCase__: Tuple =(features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=(-1.0, 1.0) , UpperCAmelCase_ : List[str]=False) ->Dict: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[Any] =input_range lowerCamelCase__: List[str] =torch.clip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) if clip else outputs # Scale to [0, 1]. lowerCamelCase__: List[str] =(outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]) ->int: '''simple docstring''' lowerCamelCase__: Dict =input_tokens > 0 lowerCamelCase__ , lowerCamelCase__: int =self.notes_encoder( encoder_input_tokens=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__: Dict =self.continuous_encoder( encoder_inputs=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple) ->int: '''simple docstring''' lowerCamelCase__: Tuple =noise_time if not torch.is_tensor(UpperCAmelCase_): lowerCamelCase__: Tuple =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device) elif torch.is_tensor(UpperCAmelCase_) and len(timesteps.shape) == 0: lowerCamelCase__: Union[str, Any] =timesteps[None].to(input_tokens.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCamelCase__: List[Any] =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device) lowerCamelCase__: Tuple =self.decoder( encodings_and_masks=UpperCAmelCase_ , decoder_input_tokens=UpperCAmelCase_ , decoder_noise_time=UpperCAmelCase_) return logits @torch.no_grad() def __call__(self : Union[str, Any] , UpperCAmelCase_ : List[List[int]] , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "numpy" , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , ) ->Union[AudioPipelineOutput, Tuple]: '''simple docstring''' if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(UpperCAmelCase_)}.""") lowerCamelCase__: List[str] =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa) lowerCamelCase__: Union[str, Any] =np.zeros([1, 0, self.n_dims] , np.floataa) lowerCamelCase__: Optional[int] =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device) for i, encoder_input_tokens in enumerate(UpperCAmelCase_): if i == 0: lowerCamelCase__: Optional[Any] =torch.from_numpy(pred_mel[:1].copy()).to( device=self.device , dtype=self.decoder.dtype) # The first chunk has no previous context. lowerCamelCase__: int =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowerCamelCase__: Tuple =ones lowerCamelCase__: Optional[Any] =self.scale_features( UpperCAmelCase_ , output_range=[-1.0, 1.0] , clip=UpperCAmelCase_) lowerCamelCase__: Tuple =self.encode( input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=UpperCAmelCase_ , continuous_mask=UpperCAmelCase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowerCamelCase__: List[Any] =randn_tensor( shape=encoder_continuous_inputs.shape , generator=UpperCAmelCase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(UpperCAmelCase_) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)): lowerCamelCase__: Optional[int] =self.decode( encodings_and_masks=UpperCAmelCase_ , input_tokens=UpperCAmelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowerCamelCase__: str =self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample lowerCamelCase__: Optional[Any] =self.scale_to_features(UpperCAmelCase_ , input_range=[-1.0, 1.0]) lowerCamelCase__: List[Any] =mel[:1] lowerCamelCase__: Optional[int] =mel.cpu().float().numpy() lowerCamelCase__: int =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCAmelCase_ , UpperCAmelCase_) logger.info("Generated segment" , UpperCAmelCase_) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.") elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.") if output_type == "numpy": lowerCamelCase__: Any =self.melgan(input_features=full_pred_mel.astype(np.floataa)) else: lowerCamelCase__: Union[str, Any] =full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=UpperCAmelCase_)
59
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str: _A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' ) _A = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
2
0
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def lowerCamelCase_ ( _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : List[str] = torch.exp(_UpperCamelCase ) snake_case_ : List[str] = torch.sum(_UpperCamelCase , dim=1 ) # sum of exp(x_i) snake_case_ : Union[str, Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(_UpperCamelCase ) - B / A class __lowerCAmelCase ( nn.Module ): def __init__(self , __magic_name__ ) -> Tuple: '''simple docstring''' super().__init__() snake_case_ : Any = config.output_attentions snake_case_ : Any = config.output_hidden_states snake_case_ : Union[str, Any] = nn.ModuleList([BertLayer(__magic_name__ ) for _ in range(config.num_hidden_layers )] ) snake_case_ : Tuple = nn.ModuleList([BertHighway(__magic_name__ ) for _ in range(config.num_hidden_layers )] ) snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )] def lowerCamelCase (self , __magic_name__ ) -> Optional[int]: '''simple docstring''' if (type(__magic_name__ ) is float) or (type(__magic_name__ ) is int): for i in range(len(self.early_exit_entropy ) ): snake_case_ : Optional[Any] = x else: snake_case_ : str = x def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Tuple = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def lowerCamelCase (self , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = () snake_case_ : str = () snake_case_ : Any = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: snake_case_ : Optional[Any] = all_hidden_states + (hidden_states,) snake_case_ : Union[str, Any] = layer_module( __magic_name__ , __magic_name__ , head_mask[i] , __magic_name__ , __magic_name__ ) snake_case_ : List[str] = layer_outputs[0] if self.output_attentions: snake_case_ : List[str] = all_attentions + (layer_outputs[1],) snake_case_ : int = (hidden_states,) if self.output_hidden_states: snake_case_ : str = current_outputs + (all_hidden_states,) if self.output_attentions: snake_case_ : Optional[int] = current_outputs + (all_attentions,) snake_case_ : Dict = self.highway[i](__magic_name__ ) # logits, pooled_output if not self.training: snake_case_ : List[str] = highway_exit[0] snake_case_ : str = entropy(__magic_name__ ) snake_case_ : Any = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy snake_case_ : Optional[Any] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: snake_case_ : Optional[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(__magic_name__ , i + 1 ) else: snake_case_ : List[str] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: snake_case_ : Dict = all_hidden_states + (hidden_states,) snake_case_ : Union[str, Any] = (hidden_states,) if self.output_hidden_states: snake_case_ : int = outputs + (all_hidden_states,) if self.output_attentions: snake_case_ : Tuple = outputs + (all_attentions,) snake_case_ : Dict = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( '''The Bert Model transformer with early exiting (DeeBERT). ''', _a, ) class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ ) -> int: '''simple docstring''' super().__init__(__magic_name__ ) snake_case_ : Any = config snake_case_ : int = BertEmbeddings(__magic_name__ ) snake_case_ : str = DeeBertEncoder(__magic_name__ ) snake_case_ : Any = BertPooler(__magic_name__ ) self.init_weights() def lowerCamelCase (self ) -> str: '''simple docstring''' self.encoder.init_highway_pooler(self.pooler ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' return self.embeddings.word_embeddings def lowerCamelCase (self , __magic_name__ ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = value def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(__magic_name__ ) @add_start_docstrings_to_model_forward(__magic_name__ ) def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> List[Any]: '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: snake_case_ : Union[str, Any] = input_ids.size() elif inputs_embeds is not None: snake_case_ : int = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) snake_case_ : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: snake_case_ : Optional[Any] = torch.ones(__magic_name__ , device=__magic_name__ ) if encoder_attention_mask is None: snake_case_ : Optional[int] = torch.ones(__magic_name__ , device=__magic_name__ ) if token_type_ids is None: snake_case_ : int = torch.zeros(__magic_name__ , dtype=torch.long , device=__magic_name__ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. snake_case_ : torch.Tensor = self.get_extended_attention_mask(__magic_name__ , __magic_name__ , __magic_name__ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: snake_case_ : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: snake_case_ : List[str] = encoder_attention_mask[:, None, None, :] snake_case_ : Optional[Any] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility snake_case_ : Any = (1.0 - encoder_extended_attention_mask) * -10_000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] snake_case_ : Any = self.get_head_mask(__magic_name__ , self.config.num_hidden_layers ) snake_case_ : Tuple = self.embeddings( input_ids=__magic_name__ , position_ids=__magic_name__ , token_type_ids=__magic_name__ , inputs_embeds=__magic_name__ ) snake_case_ : List[Any] = self.encoder( __magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , ) snake_case_ : List[str] = encoder_outputs[0] snake_case_ : List[Any] = self.pooler(__magic_name__ ) snake_case_ : Any = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ , __magic_name__ ) -> Tuple: '''simple docstring''' snake_case_ : Dict = message snake_case_ : Dict = exit_layer # start from 1! class __lowerCAmelCase ( nn.Module ): def __init__(self , __magic_name__ ) -> List[str]: '''simple docstring''' super().__init__() snake_case_ : Optional[int] = BertPooler(__magic_name__ ) snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob ) snake_case_ : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels ) def lowerCamelCase (self , __magic_name__ ) -> Optional[int]: '''simple docstring''' snake_case_ : str = encoder_outputs[0] snake_case_ : Dict = self.pooler(__magic_name__ ) # "return" pooler_output # BertModel snake_case_ : Any = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification snake_case_ : Dict = bmodel_output[1] snake_case_ : List[Any] = self.dropout(__magic_name__ ) snake_case_ : Any = self.classifier(__magic_name__ ) return logits, pooled_output @add_start_docstrings( '''Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. ''', _a, ) class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ ) -> int: '''simple docstring''' super().__init__(__magic_name__ ) snake_case_ : Dict = config.num_labels snake_case_ : Union[str, Any] = config.num_hidden_layers snake_case_ : str = DeeBertModel(__magic_name__ ) snake_case_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob ) snake_case_ : Dict = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(__magic_name__ ) def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=-1 , __magic_name__=False , ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[Any] = self.num_layers try: snake_case_ : int = self.bert( __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits snake_case_ : List[str] = outputs[1] snake_case_ : List[str] = self.dropout(__magic_name__ ) snake_case_ : Tuple = self.classifier(__magic_name__ ) snake_case_ : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: snake_case_ : Optional[int] = e.message snake_case_ : Dict = e.exit_layer snake_case_ : str = outputs[0] if not self.training: snake_case_ : Optional[int] = entropy(__magic_name__ ) snake_case_ : Optional[int] = [] snake_case_ : List[str] = [] if labels is not None: if self.num_labels == 1: # We are doing regression snake_case_ : str = MSELoss() snake_case_ : Optional[int] = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: snake_case_ : List[Any] = CrossEntropyLoss() snake_case_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits snake_case_ : str = [] for highway_exit in outputs[-1]: snake_case_ : List[Any] = highway_exit[0] if not self.training: highway_logits_all.append(__magic_name__ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression snake_case_ : Optional[Any] = MSELoss() snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: snake_case_ : Tuple = CrossEntropyLoss() snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__magic_name__ ) if train_highway: snake_case_ : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: snake_case_ : int = (loss,) + outputs if not self.training: snake_case_ : Dict = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: snake_case_ : List[Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
60
from graphs.minimum_spanning_tree_kruskal import kruskal def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _A = 9 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = kruskal(_snake_case , _snake_case ) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_snake_case ) == sorted(_snake_case )
2
0
def _A ( lowerCAmelCase_ : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = [0] * len(lowerCAmelCase_ ) lowerCAmelCase__ = [] lowerCAmelCase__ = [1] * len(lowerCAmelCase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowerCAmelCase_ ) ): if indegree[i] == 0: queue.append(lowerCAmelCase_ ) while queue: lowerCAmelCase__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: lowerCAmelCase__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowerCAmelCase_ ) print(max(lowerCAmelCase_ ) ) # Adjacency list of Graph UpperCamelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
61
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
def lowerCamelCase__ ( lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) print("The following activities are selected:" ) # The first activity is always selected SCREAMING_SNAKE_CASE : Optional[int] = 0 print(lowercase , end="," ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase , end="," ) SCREAMING_SNAKE_CASE : List[str] = j if __name__ == "__main__": import doctest doctest.testmod() snake_case = [1, 3, 0, 5, 8, 5] snake_case = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
62
UpperCAmelCase_ = 2_5_6 # Modulus to hash a string UpperCAmelCase_ = 1_0_0_0_0_0_3 def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool: _A = len(_snake_case ) _A = len(_snake_case ) if p_len > t_len: return False _A = 0 _A = 0 _A = 1 # Calculating the hash of pattern and substring of text for i in range(_snake_case ): _A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def SCREAMING_SNAKE_CASE_ ( ) -> None: _A = '''abc1abc12''' _A = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _A = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case ) # Test 2) _A = '''ABABX''' _A = '''ABABZABABYABABX''' assert rabin_karp(_snake_case , _snake_case ) # Test 3) _A = '''AAAB''' _A = '''ABAAAAAB''' assert rabin_karp(_snake_case , _snake_case ) # Test 4) _A = '''abcdabcy''' _A = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(_snake_case , _snake_case ) # Test 5) _A = '''Lü''' _A = '''Lüsai''' assert rabin_karp(_snake_case , _snake_case ) _A = '''Lue''' assert not rabin_karp(_snake_case , _snake_case ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
2
0
def lowerCamelCase__ ( __lowerCamelCase : list ): if len(__lowerCamelCase ) <= 1: return lst __UpperCAmelCase : Optional[Any] = 1 while i < len(__lowerCamelCase ): if lst[i - 1] <= lst[i]: i += 1 else: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = lst[i], lst[i - 1] i -= 1 if i == 0: __UpperCAmelCase : Optional[int] = 1 return lst if __name__ == "__main__": a : Tuple = input("Enter numbers separated by a comma:\n").strip() a : Union[str, Any] = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
63
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } UpperCAmelCase_ = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } UpperCAmelCase_ = """</w>""" UpperCAmelCase_ = """@@ """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs # Speech2Text2 has no max input length UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict: super().__init__( unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , ) _A = do_lower_case with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) _A = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) _A = None _A = None else: with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: _A = merges_handle.read().split('''\n''' )[:-1] _A = [tuple(merge.split()[:2] ) for merge in merges] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = {} @property def snake_case_ ( self : List[str] ) -> int: return len(self.decoder ) def snake_case_ ( self : Dict ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: _A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(__lowerCAmelCase ): try: _A = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__lowerCAmelCase ) _A = new_word if len(__lowerCAmelCase ) == 1: break else: _A = get_pairs(__lowerCAmelCase ) _A = ''' '''.join(__lowerCAmelCase ) if word == "\n " + BPE_TOKEN_MERGES: _A = '''\n''' + BPE_TOKEN_MERGES if word.endswith(__lowerCAmelCase ): _A = word.replace(__lowerCAmelCase , '''''' ) _A = word.replace(''' ''' , __lowerCAmelCase ) _A = word return word def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: _A = text.lower() _A = text.split() _A = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int: return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str: _A = self.decoder.get(__lowerCAmelCase , self.unk_token ) return result def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str: _A = ''' '''.join(__lowerCAmelCase ) # make sure @@ tokens are concatenated _A = ''''''.join(string.split(__lowerCAmelCase ) ) return string def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) _A = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) _A = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
2
0
from __future__ import annotations class _lowerCamelCase : def __init__( self , lowerCAmelCase = 0 ) -> List[str]: SCREAMING_SNAKE_CASE__: Tuple= key def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> list[str]: assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Union[str, Any]= key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowerCAmelCase ) ^ key ) for ch in content] def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> list[str]: assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowerCAmelCase ) ^ key ) for ch in content] def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = 0 ) -> str: assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Any= key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE__: Union[str, Any]= '''''' for ch in content: ans += chr(ord(lowerCAmelCase ) ^ key ) return ans def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = 0 ) -> str: assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Union[str, Any]= key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE__: Optional[Any]= '''''' for ch in content: ans += chr(ord(lowerCAmelCase ) ^ key ) return ans def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = 0 ) -> bool: assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ) try: with open(lowerCAmelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowerCAmelCase , lowerCAmelCase ) ) except OSError: return False return True def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> bool: assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ) try: with open(lowerCAmelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowerCAmelCase , lowerCAmelCase ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
64
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Optional[int] ) -> None: _A = [] _A = {} _A = 0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Optional[int] ) -> str: return str(self.heap ) def snake_case_ ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _A , _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A = self.heap[0] self._bubble_down(__lowerCAmelCase ) return elem def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Update the weight of the given key _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[curr_pos] _A , _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_up(__lowerCAmelCase ) return None def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A = self.position_map[elem] _A , _A = self.heap[curr_pos] _A = get_child_left_position(__lowerCAmelCase ) _A = get_child_right_position(__lowerCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _A , _A = self.heap[child_left_position] _A , _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) if child_left_position < self.elements: _A , _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) else: return None if child_right_position < self.elements: _A , _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) return None def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: # Swap the nodes at the given positions _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A , _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : str ) -> None: _A = {} _A = 0 def __repr__( self : str ) -> str: return str(self.connections ) def __len__( self : Dict ) -> int: return self.nodes def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A = {} self.nodes += 1 def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCAmelCase ) self.add_node(__lowerCAmelCase ) _A = weight _A = weight def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_snake_case , _snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node return dist, parent
2
0
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
65
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = """▁""" UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} UpperCAmelCase_ = { """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Tuple = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _A = vocab_file _A = monolingual_vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _A = {} _A = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = cnt cnt += 1 with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _A = line.strip().split()[0] _A = len(self.fairseq_tokens_to_ids ) if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = len(self.fairseq_tokens_to_ids ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> List[Any]: _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def snake_case_ ( self : Dict ) -> Optional[Any]: _A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]: return self.fairseq_ids_to_tokens[index] def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple: _A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__lowerCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
2
0
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets UpperCamelCase = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n" UpperCamelCase = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n" UpperCamelCase = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: return float((preds == labels).mean() ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Optional[int] = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : int = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) ) return { "accuracy": acc, "f1": fa, } def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: _lowercase : Optional[Any] = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] ) _lowercase : int = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def __a ( self ): if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase ): if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )} elif self.config_name == "stsb": return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
66
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]: _A , _A = set(_snake_case ), [start] while stack: _A = stack.pop() explored.add(_snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_snake_case ) return explored UpperCAmelCase_ = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
2
0
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters snake_case = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] , snake_case__ :Optional[int] , snake_case__ :Any , snake_case__ :Union[str, Any]=None , snake_case__ :str=None ) -> Optional[Any]: # Recurse if needed if "." in tensor_name: _lowercase = tensor_name.split('.' ) for split in splits[:-1]: _lowercase = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) _lowercase = new_module _lowercase = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) _lowercase = tensor_name in module._buffers _lowercase = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) _lowercase = False _lowercase = False if is_buffer or not is_bitsandbytes_available(): _lowercase = False _lowercase = False else: _lowercase = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) _lowercase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: _lowercase = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: _lowercase = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): _lowercase = value.to('cpu' ) if value.dtype == torch.inta: _lowercase = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: _lowercase = torch.tensor(snake_case__ , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: _lowercase = new_value.T _lowercase = old_value.__dict__ if is_abit: _lowercase = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: _lowercase = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) _lowercase = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: _lowercase = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): _lowercase = value.to(snake_case__ ) else: _lowercase = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: _lowercase = new_value else: _lowercase = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) _lowercase = new_value def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] , snake_case__ :Any=None , snake_case__ :Union[str, Any]=None , snake_case__ :Optional[Any]=None , snake_case__ :str=False ) -> Optional[int]: for name, module in model.named_children(): if current_key_name is None: _lowercase = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): _lowercase , _lowercase = module.weight.shape else: _lowercase = module.in_features _lowercase = module.out_features if quantization_config.quantization_method() == "llm_int8": _lowercase = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) _lowercase = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: _lowercase = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) _lowercase = True # Store the module class in case we need to transpose the weight later _lowercase = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: _lowercase , _lowercase = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :Union[str, Any]=None , snake_case__ :Union[str, Any]=None , snake_case__ :Union[str, Any]=None ) -> Optional[Any]: _lowercase = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert _lowercase , _lowercase = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def SCREAMING_SNAKE_CASE__ ( *snake_case__ :Optional[Any] , **snake_case__ :Optional[int] ) -> str: warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( *snake_case__ :Dict , **snake_case__ :List[str] ) -> Tuple: warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Any: _lowercase = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() _lowercase = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): _lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: _lowercase = sum(snake_case__ , [] ) _lowercase = len(snake_case__ ) > 0 # Check if it is a base model _lowercase = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _lowercase = list(model.named_children() ) _lowercase = [list_modules[-1][0]] # add last module together with tied weights _lowercase = set(snake_case__ ) - set(snake_case__ ) _lowercase = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys _lowercase = ['.weight', '.bias'] _lowercase = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _lowercase = name.replace(snake_case__ , '' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
67
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
2
0
from collections.abc import Callable import numpy as np def lowercase__ ( A_: Callable , A_: float , A_: float , A_: float , A_: float ) -> np.array: """simple docstring""" __UpperCAmelCase =int(np.ceil((x_end - xa) / step_size ) ) __UpperCAmelCase =np.zeros((n + 1,) ) __UpperCAmelCase =ya __UpperCAmelCase =xa for k in range(A_ ): __UpperCAmelCase =y[k] + step_size * ode_func(A_ , y[k] ) __UpperCAmelCase =y[k] + ( (step_size / 2) * (ode_func(A_ , y[k] ) + ode_func(x + step_size , A_ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
68
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "xlnet" a__ : Dict = ["mems"] a__ : List[str] = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple: _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs['''use_cache'''] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
2
0
'''simple docstring''' import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''): a : Dict = True from torch.cuda.amp import autocast a : str = logging.getLogger(__name__) def __UpperCAmelCase ( _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Any=None ) -> Optional[int]: return field(default_factory=lambda: default , metadata=_UpperCAmelCase ) @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __SCREAMING_SNAKE_CASE = field( default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) __SCREAMING_SNAKE_CASE = field( default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) __SCREAMING_SNAKE_CASE = field( default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} ) __SCREAMING_SNAKE_CASE = field( default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} ) __SCREAMING_SNAKE_CASE = field( default=0.1 , metadata={ """help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.""" } , ) __SCREAMING_SNAKE_CASE = field( default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , ) __SCREAMING_SNAKE_CASE = field( default=0.05 , metadata={ """help""": ( """Propability of each feature vector along the time axis to be chosen as the start of the vector""" """span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature""" """vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.""" ) } , ) __SCREAMING_SNAKE_CASE = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} ) @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = field( default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) __SCREAMING_SNAKE_CASE = field( default="""train+validation""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) __SCREAMING_SNAKE_CASE = field( default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) __SCREAMING_SNAKE_CASE = field( default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) __SCREAMING_SNAKE_CASE = field( default=_UpperCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) __SCREAMING_SNAKE_CASE = field( default=_UpperCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of validation examples to this """ """value if set.""" ) } , ) __SCREAMING_SNAKE_CASE = list_field( default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , ) @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def __call__( self : Tuple , a_ : List[Dict[str, Union[List[int], torch.Tensor]]] ): """simple docstring""" __snake_case = [{"input_values": feature["input_values"]} for feature in features] __snake_case = [{"input_ids": feature["labels"]} for feature in features] __snake_case = self.processor.pad( a_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) __snake_case = self.processor.pad( labels=a_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , ) # replace padding with -100 to ignore loss correctly __snake_case = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 ) __snake_case = labels return batch class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def A ( self : List[str] , a_ : nn.Module , a_ : Dict[str, Union[torch.Tensor, Any]] ): """simple docstring""" model.train() __snake_case = self._prepare_inputs(a_ ) if self.use_amp: with autocast(): __snake_case = self.compute_loss(a_ , a_ ) else: __snake_case = self.compute_loss(a_ , a_ ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": __snake_case = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __snake_case = loss.sum() / (inputs["labels"] >= 0).sum() else: raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: __snake_case = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(a_ ).backward() elif self.use_apex: with amp.scale_loss(a_ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(a_ ) else: loss.backward() return loss.detach() def __UpperCAmelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __snake_case = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __snake_case = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , _UpperCAmelCase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: __snake_case = datasets.load_dataset( "common_voice" , data_args.dataset_config_name , split=data_args.train_split_name ) __snake_case = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" ) # Create and save tokenizer __snake_case = F'''[{"".join(data_args.chars_to_ignore )}]''' def remove_special_characters(_UpperCAmelCase : Dict ): __snake_case = re.sub(_UpperCAmelCase , "" , batch["sentence"] ).lower() + " " return batch __snake_case = train_dataset.map(_UpperCAmelCase , remove_columns=["sentence"] ) __snake_case = eval_dataset.map(_UpperCAmelCase , remove_columns=["sentence"] ) def extract_all_chars(_UpperCAmelCase : Tuple ): __snake_case = " ".join(batch["text"] ) __snake_case = list(set(_UpperCAmelCase ) ) return {"vocab": [vocab], "all_text": [all_text]} __snake_case = train_dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , batch_size=-1 , keep_in_memory=_UpperCAmelCase , remove_columns=train_dataset.column_names , ) __snake_case = train_dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , batch_size=-1 , keep_in_memory=_UpperCAmelCase , remove_columns=eval_dataset.column_names , ) __snake_case = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) ) __snake_case = {v: k for k, v in enumerate(_UpperCAmelCase )} __snake_case = vocab_dict[" "] del vocab_dict[" "] __snake_case = len(_UpperCAmelCase ) __snake_case = len(_UpperCAmelCase ) with open("vocab.json" , "w" ) as vocab_file: json.dump(_UpperCAmelCase , _UpperCAmelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case = WavaVecaCTCTokenizer( "vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , ) __snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase ) __snake_case = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) __snake_case = WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: __snake_case = min(len(_UpperCAmelCase ) , data_args.max_train_samples ) __snake_case = train_dataset.select(range(_UpperCAmelCase ) ) if data_args.max_val_samples is not None: __snake_case = eval_dataset.select(range(data_args.max_val_samples ) ) __snake_case = torchaudio.transforms.Resample(4_80_00 , 1_60_00 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(_UpperCAmelCase : Tuple ): __snake_case , __snake_case = torchaudio.load(batch["path"] ) __snake_case = resampler(_UpperCAmelCase ).squeeze().numpy() __snake_case = 1_60_00 __snake_case = batch["text"] return batch __snake_case = train_dataset.map( _UpperCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) __snake_case = eval_dataset.map( _UpperCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(_UpperCAmelCase : Dict ): # check that all files have the correct sampling rate assert ( len(set(batch["sampling_rate"] ) ) == 1 ), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.''' __snake_case = processor( audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] ) batch.update(_UpperCAmelCase ) return batch __snake_case = train_dataset.map( _UpperCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , ) __snake_case = eval_dataset.map( _UpperCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , ) # Metric __snake_case = datasets.load_metric("wer" ) def compute_metrics(_UpperCAmelCase : List[str] ): __snake_case = pred.predictions __snake_case = np.argmax(_UpperCAmelCase , axis=-1 ) __snake_case = processor.tokenizer.pad_token_id __snake_case = processor.batch_decode(_UpperCAmelCase ) # we do not want to group tokens when computing the metrics __snake_case = processor.batch_decode(pred.label_ids , group_tokens=_UpperCAmelCase ) __snake_case = wer_metric.compute(predictions=_UpperCAmelCase , references=_UpperCAmelCase ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator __snake_case = DataCollatorCTCWithPadding(processor=_UpperCAmelCase , padding=_UpperCAmelCase ) # Initialize our Trainer __snake_case = CTCTrainer( model=_UpperCAmelCase , data_collator=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: __snake_case = last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): __snake_case = model_args.model_name_or_path else: __snake_case = None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) __snake_case = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) trainer.save_model() __snake_case = train_result.metrics __snake_case = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase ) ) __snake_case = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics("train" , _UpperCAmelCase ) trainer.save_metrics("train" , _UpperCAmelCase ) trainer.save_state() # Evaluation __snake_case = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __snake_case = trainer.evaluate() __snake_case = data_args.max_val_samples if data_args.max_val_samples is not None else len(_UpperCAmelCase ) __snake_case = min(_UpperCAmelCase , len(_UpperCAmelCase ) ) trainer.log_metrics("eval" , _UpperCAmelCase ) trainer.save_metrics("eval" , _UpperCAmelCase ) return results if __name__ == "__main__": main()
69
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str: return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Optional[int]=10 ): '''simple docstring''' lowerCamelCase_ = [] for _ in range(lowercase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Any=10 ): '''simple docstring''' lowerCamelCase_ = [] for step in range(lowercase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ = os.path.join(lowercase , 'schedule.bin' ) torch.save(scheduler.state_dict() , lowercase ) lowerCamelCase_ = torch.load(lowercase ) scheduler.load_state_dict(lowercase ) return lrs @require_torch class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : str , A_ : List[str] , A_ : str , A_ : Any ) -> Dict: """simple docstring""" self.assertEqual(len(A_ ) , len(A_ ) ) for a, b in zip(A_ , A_ ): self.assertAlmostEqual(A_ , A_ , delta=A_ ) def a__ ( self : List[Any] ) -> Any: """simple docstring""" lowerCamelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A_ ) lowerCamelCase_ = torch.tensor([0.4, 0.2, -0.5] ) lowerCamelCase_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCamelCase_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): lowerCamelCase_ = criterion(A_ , A_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def a__ ( self : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A_ ) lowerCamelCase_ = torch.tensor([0.4, 0.2, -0.5] ) lowerCamelCase_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCamelCase_ = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=A_ , weight_decay=0.0 , relative_step=A_ , scale_parameter=A_ , warmup_init=A_ , ) for _ in range(1000 ): lowerCamelCase_ = criterion(A_ , A_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class A( unittest.TestCase ): '''simple docstring''' UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None UpperCamelCase = 10 def a__ ( self : Optional[int] , A_ : List[str] , A_ : Tuple , A_ : Optional[int] , A_ : Dict=None ) -> List[str]: """simple docstring""" self.assertEqual(len(A_ ) , len(A_ ) ) for a, b in zip(A_ , A_ ): self.assertAlmostEqual(A_ , A_ , delta=A_ , msg=A_ ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCamelCase_ = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): lowerCamelCase_ , lowerCamelCase_ = data lowerCamelCase_ = scheduler_func(self.optimizer , **A_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCamelCase_ = unwrap_schedule(A_ , self.num_steps ) self.assertListAlmostEqual( A_ , A_ , tol=1E-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , ) lowerCamelCase_ = scheduler_func(self.optimizer , **A_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(A_ ) # wrap to test picklability of the schedule lowerCamelCase_ = unwrap_and_save_reload_schedule(A_ , self.num_steps ) self.assertListEqual(A_ , A_ , msg=f"""failed for {scheduler_func} in save and reload""" ) class A: '''simple docstring''' def __init__( self : Optional[Any] , A_ : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ = fn def __call__( self : List[str] , *A_ : List[Any] , **A_ : Tuple ) -> Any: """simple docstring""" return self.fn(*A_ , **A_ ) @classmethod def a__ ( self : Dict , A_ : Optional[Any] ) -> int: """simple docstring""" lowerCamelCase_ = list(map(self , scheduler.lr_lambdas ) )
70
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
'''simple docstring''' import math def a__ ( _SCREAMING_SNAKE_CASE : int = 1_00 ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = sum(i * i for i in range(1 , n + 1 ) ) UpperCAmelCase_ : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
71
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
0
'''simple docstring''' def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> str: '''simple docstring''' return "\n".join( f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
72
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
0
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self , a , a=13 , a=32 , a=3 , a=4 , a=[10, 20, 30, 40] , a=[2, 2, 3, 2] , a=True , a=True , a=37 , a="gelu" , a=10 , a=0.02 , a=["stage2", "stage3", "stage4"] , a=[2, 3, 4] , a=None , ) -> str: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_stages SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = out_features SCREAMING_SNAKE_CASE = out_indices SCREAMING_SNAKE_CASE = scope def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self) -> str: return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int: SCREAMING_SNAKE_CASE = ConvNextModel(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any: SCREAMING_SNAKE_CASE = ConvNextForImageClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , labels=a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Dict: SCREAMING_SNAKE_CASE = ConvNextBackbone(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a) # verify hidden states self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = ConvNextBackbone(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _snake_case ( A__ , A__ , unittest.TestCase ): _lowercase : List[str] = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) _lowercase : Optional[Any] = ( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) _lowercase : str = True _lowercase : List[Any] = False _lowercase : Dict = False _lowercase : Optional[Any] = False _lowercase : int = False def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = ConvNextModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: return @unittest.skip(reason='ConvNext does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ ( self) -> Dict: pass @unittest.skip(reason='ConvNext does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: pass @unittest.skip(reason='ConvNext does not use feedforward chunking') def SCREAMING_SNAKE_CASE__ ( self) -> Any: pass def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(a) SCREAMING_SNAKE_CASE = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ['pixel_values'] self.assertListEqual(arg_names[:1] , a) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: def check_hidden_states_output(a , a , a): SCREAMING_SNAKE_CASE = model_class(a) model.to(a) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a)) SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(a) , expected_num_stages + 1) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(a , a , a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(a , a , a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a) @slow def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = ConvNextModel.from_pretrained(a) self.assertIsNotNone(a) def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224') if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224').to(a) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='pt').to(a) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**a) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , a) SCREAMING_SNAKE_CASE = torch.tensor([-0.02_60, -0.47_39, 0.19_11]).to(a) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4)) @require_torch class _snake_case ( unittest.TestCase , A__ ): _lowercase : Tuple = (ConvNextBackbone,) if is_torch_available() else () _lowercase : Tuple = ConvNextConfig _lowercase : int = False def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = ConvNextModelTester(self)
73
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
0
from abc import ABC, abstractmethod from typing import List, Optional class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = 0 __SCREAMING_SNAKE_CASE : str = False while not completed: if counter == 1: self.reset() __SCREAMING_SNAKE_CASE : int = self.advance() if not self.does_advance(_A ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self.update(_A ) counter += 1 if counter > 1_0000: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase__ ( self : Tuple , _A : int ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase__ ( self : Any , _A : int ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase__ ( self : Tuple , _A : Dict=False ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : int , _A : List[int] ): """simple docstring""" super(_A , self ).__init__() if not isinstance(_A , _A ) or len(_A ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) __SCREAMING_SNAKE_CASE : Tuple = token_ids __SCREAMING_SNAKE_CASE : List[Any] = len(self.token_ids ) __SCREAMING_SNAKE_CASE : Optional[Any] = -1 # the index of the currently fulfilled step __SCREAMING_SNAKE_CASE : int = False def UpperCAmelCase__ ( self : Dict ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase__ ( self : Tuple , _A : int ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_A )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase__ ( self : str , _A : int ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_A )}''' ) __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False if self.does_advance(_A ): self.fulfilled_idx += 1 __SCREAMING_SNAKE_CASE : int = True if self.fulfilled_idx == (self.seqlen - 1): __SCREAMING_SNAKE_CASE : Any = True __SCREAMING_SNAKE_CASE : Optional[Any] = completed else: # failed to make progress. __SCREAMING_SNAKE_CASE : Optional[int] = True self.reset() return stepped, completed, reset def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : List[Any] = 0 def UpperCAmelCase__ ( self : int ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any]=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = PhrasalConstraint(self.token_ids ) if stateful: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.seqlen __SCREAMING_SNAKE_CASE : Optional[int] = self.fulfilled_idx __SCREAMING_SNAKE_CASE : Dict = self.completed return new_constraint class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[int] , _A : List[List[int]] , _A : Tuple=True ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = max([len(_A ) for one in nested_token_ids] ) __SCREAMING_SNAKE_CASE : List[str] = {} for token_ids in nested_token_ids: __SCREAMING_SNAKE_CASE : List[str] = root for tidx, token_id in enumerate(_A ): if token_id not in level: __SCREAMING_SNAKE_CASE : Optional[Any] = {} __SCREAMING_SNAKE_CASE : Dict = level[token_id] if no_subsets and self.has_subsets(_A , _A ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' F''' {nested_token_ids}.''' ) __SCREAMING_SNAKE_CASE : Dict = root def UpperCAmelCase__ ( self : Dict , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.trie for current_token in current_seq: __SCREAMING_SNAKE_CASE : str = start[current_token] __SCREAMING_SNAKE_CASE : Optional[Any] = list(start.keys() ) return next_tokens def UpperCAmelCase__ ( self : Tuple , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.next_tokens(_A ) return len(_A ) == 0 def UpperCAmelCase__ ( self : int , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = list(root.values() ) if len(_A ) == 0: return 1 else: return sum([self.count_leaves(_A ) for nn in next_nodes] ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any] , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.count_leaves(_A ) return len(_A ) != leaf_count class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : str , _A : List[List[int]] ): """simple docstring""" super(_A , self ).__init__() if not isinstance(_A , _A ) or len(_A ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_A , _A ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveTrie(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = nested_token_ids __SCREAMING_SNAKE_CASE : Any = self.trie.max_height __SCREAMING_SNAKE_CASE : List[Any] = [] __SCREAMING_SNAKE_CASE : Optional[int] = False def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq ) if len(_A ) == 0: return None else: return token_list def UpperCAmelCase__ ( self : Union[str, Any] , _A : int ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}''' ) __SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def UpperCAmelCase__ ( self : List[str] , _A : int ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}''' ) __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False if self.does_advance(_A ): self.current_seq.append(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = True else: __SCREAMING_SNAKE_CASE : Tuple = True self.reset() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.trie.reached_leaf(self.current_seq ) __SCREAMING_SNAKE_CASE : Optional[Any] = completed return stepped, completed, reset def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : Any = [] def UpperCAmelCase__ ( self : Any ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def UpperCAmelCase__ ( self : Dict , _A : List[str]=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids ) if stateful: __SCREAMING_SNAKE_CASE : Dict = self.seqlen __SCREAMING_SNAKE_CASE : Tuple = self.current_seq __SCREAMING_SNAKE_CASE : Optional[int] = self.completed return new_constraint class __UpperCamelCase : """simple docstring""" def __init__( self : Dict , _A : List[Constraint] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = constraints # max # of steps required to fulfill a given constraint __SCREAMING_SNAKE_CASE : Dict = max([c.seqlen for c in constraints] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = len(_A ) __SCREAMING_SNAKE_CASE : List[str] = False self.init_state() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Union[str, Any] = [constraint.copy(stateful=_A ) for constraint in self.constraints] def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __SCREAMING_SNAKE_CASE : Any = constraint.advance() if isinstance(_A , _A ): token_list.append(_A ) elif isinstance(_A , _A ): token_list.extend(_A ) else: __SCREAMING_SNAKE_CASE : Any = self.inprogress_constraint.advance() if isinstance(_A , _A ): token_list.append(_A ) elif isinstance(_A , _A ): token_list.extend(_A ) if len(_A ) == 0: return None else: return token_list def UpperCAmelCase__ ( self : int , _A : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.add(_A ) # the entire list of constraints are fulfilled if self.completed: break def UpperCAmelCase__ ( self : Optional[int] , _A : int ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = False, False if self.completed: __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : List[str] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.inprogress_constraint.update(_A ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_A ) ) __SCREAMING_SNAKE_CASE : int = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __SCREAMING_SNAKE_CASE : Union[str, Any] = None if len(self.pending_constraints ) == 0: # we're done! __SCREAMING_SNAKE_CASE : List[str] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_A ): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pending_constraint.update(_A ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(_A ) __SCREAMING_SNAKE_CASE : Tuple = None if not complete and stepped: __SCREAMING_SNAKE_CASE : List[str] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __SCREAMING_SNAKE_CASE : Optional[int] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __SCREAMING_SNAKE_CASE : Any = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def UpperCAmelCase__ ( self : Union[str, Any] , _A : int=True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __SCREAMING_SNAKE_CASE : List[str] = [ constraint.copy(stateful=_A ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __SCREAMING_SNAKE_CASE : Tuple = self.inprogress_constraint.copy(stateful=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = [constraint.copy() for constraint in self.pending_constraints] return new_state
74
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
0
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : List[Any] , _A : Tuple=12 , _A : Tuple=7 , _A : str=True , _A : List[Any]=True , _A : Any=True , _A : Optional[Any]=99 , _A : Tuple=32 , _A : Dict=32 , _A : Tuple=2 , _A : Any=4 , _A : Dict=37 , _A : int=0.1 , _A : List[str]=0.1 , _A : Optional[int]=512 , _A : int=0.0_2 , _A : List[Any]=0 , _A : Union[str, Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : Tuple = seq_length UpperCAmelCase__ : Optional[int] = is_training UpperCAmelCase__ : Optional[int] = use_input_mask UpperCAmelCase__ : Tuple = use_labels UpperCAmelCase__ : Dict = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = projection_dim UpperCAmelCase__ : Tuple = num_hidden_layers UpperCAmelCase__ : List[Any] = num_attention_heads UpperCAmelCase__ : List[Any] = intermediate_size UpperCAmelCase__ : List[Any] = dropout UpperCAmelCase__ : int = attention_dropout UpperCAmelCase__ : Optional[int] = max_position_embeddings UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[str] = scope UpperCAmelCase__ : Union[str, Any] = bos_token_id def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Tuple = None if self.use_input_mask: UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase__ : Optional[int] = input_mask.numpy() UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = input_mask.shape UpperCAmelCase__ : Dict = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_A ): UpperCAmelCase__ : str = 1 UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : Optional[int] = self.get_config() return config, input_ids, tf.convert_to_tensor(_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def lowercase_ ( self : Optional[Any] , _A : int , _A : Dict , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = TFBlipTextModel(config=_A ) UpperCAmelCase__ : Optional[int] = model(_A , attention_mask=_A , training=_A ) UpperCAmelCase__ : List[Any] = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs UpperCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = (TFBlipTextModel,) if is_tf_available() else () lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = BlipTextModelTester(self ) UpperCAmelCase__ : str = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' pass def lowercase_ ( self : int ): '''simple docstring''' pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def lowercase_ ( self : str ): '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def lowercase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Any = TFBlipTextModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def lowercase_ ( self : Any , _A : Tuple=True ): '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
75
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = "openai-gpt" a__ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = afn _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_first_dropout _A = summary_proj_to_labels super().__init__(**__lowerCAmelCase )
2
0
"""simple docstring""" import math import tensorflow as tf from packaging import version def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : List[str] = tf.convert_to_tensor(__UpperCamelCase ) __lowercase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[int] = tf.convert_to_tensor(__UpperCamelCase ) __lowercase : str = tf.cast(math.pi , x.dtype ) __lowercase : Dict = tf.cast(0.044_715 , x.dtype ) __lowercase : Any = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) )) return x * cdf def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Tuple = tf.convert_to_tensor(__UpperCamelCase ) return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) ) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : str = tf.convert_to_tensor(__UpperCamelCase ) __lowercase : int = tf.cast(0.044_715 , x.dtype ) __lowercase : Union[str, Any] = tf.cast(0.7_978_845_608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase ) __lowercase : Dict = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def __UpperCAmelCase ( __UpperCamelCase ): return tf.clip_by_value(_gelu(__UpperCamelCase ) , -10 , 10 ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=-1 ): __lowercase ,__lowercase : str = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase ) return a * tf.math.sigmoid(__UpperCamelCase ) if version.parse(tf.version.VERSION) >= version.parse('2.4'): def __UpperCAmelCase ( __UpperCamelCase ): return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase ) a_ = tf.keras.activations.gelu a_ = approximate_gelu_wrap else: a_ = _gelu a_ = _gelu_new a_ = { 'gelu': gelu, 'gelu_10': gelu_aa, 'gelu_fast': gelu_fast, 'gelu_new': gelu_new, 'glu': glu, 'mish': mish, 'quick_gelu': quick_gelu, 'relu': tf.keras.activations.relu, 'sigmoid': tf.keras.activations.sigmoid, 'silu': tf.keras.activations.swish, 'swish': tf.keras.activations.swish, 'tanh': tf.keras.activations.tanh, } def __UpperCAmelCase ( __UpperCamelCase ): if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
76
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) A = { """configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""], """tokenization_deberta""": ["""DebertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = ["""DebertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """DebertaForMaskedLM""", """DebertaForQuestionAnswering""", """DebertaForSequenceClassification""", """DebertaForTokenClassification""", """DebertaModel""", """DebertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDebertaForMaskedLM""", """TFDebertaForQuestionAnswering""", """TFDebertaForSequenceClassification""", """TFDebertaForTokenClassification""", """TFDebertaModel""", """TFDebertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
0
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = int(snake_case_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = t // 36_00, (t // 60) % 60, t % 60 return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}""" def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int=3_00 ) -> str: '''simple docstring''' return f""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """ def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = "<table border=\"1\" class=\"dataframe\">\n" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f""" <th>{i}</th>\n""" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: UpperCAmelCase_ = f"""{elt:.6f}""" if isinstance(snake_case_ , snake_case_ ) else str(snake_case_ ) html_code += f""" <td>{elt}</td>\n""" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __A : a__ : Dict = 5 a__ : Any = 0.2 def __init__(self : Optional[Any] , __a : int , __a : Optional[str] = None , __a : bool = True , __a : Optional["NotebookTrainingTracker"] = None , __a : int = 300 , ): UpperCAmelCase_ = total UpperCAmelCase_ = "" if prefix is None else prefix UpperCAmelCase_ = leave UpperCAmelCase_ = parent UpperCAmelCase_ = width UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None def _lowercase (self : str , __a : int , __a : bool = False , __a : str = None ): UpperCAmelCase_ = value if comment is not None: UpperCAmelCase_ = comment if self.last_value is None: UpperCAmelCase_ = UpperCAmelCase_ = time.time() UpperCAmelCase_ = UpperCAmelCase_ = value UpperCAmelCase_ = UpperCAmelCase_ = None UpperCAmelCase_ = self.warmup UpperCAmelCase_ = 1 self.update_bar(__a ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 UpperCAmelCase_ = time.time() UpperCAmelCase_ = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: UpperCAmelCase_ = self.elapsed_time / (value - self.start_value) else: UpperCAmelCase_ = None if value >= self.total: UpperCAmelCase_ = self.total UpperCAmelCase_ = None if not self.leave: self.close() elif self.average_time_per_item is not None: UpperCAmelCase_ = self.average_time_per_item * (self.total - value) self.update_bar(__a ) UpperCAmelCase_ = value UpperCAmelCase_ = current_time if self.average_time_per_item is None: UpperCAmelCase_ = 1 else: UpperCAmelCase_ = max(int(self.update_every / self.average_time_per_item ) , 1 ) def _lowercase (self : Optional[int] , __a : Tuple , __a : List[Any]=None ): UpperCAmelCase_ = " " * (len(str(self.total ) ) - len(str(__a ) )) + str(__a ) if self.elapsed_time is None: UpperCAmelCase_ = f"""[{spaced_value}/{self.total} : < :""" elif self.predicted_remaining is None: UpperCAmelCase_ = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}""" else: UpperCAmelCase_ = ( f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <""" f""" {format_time(self.predicted_remaining )}""" ) self.label += f""", {1/self.average_time_per_item:.2f} it/s""" self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]""" self.display() def _lowercase (self : Dict ): UpperCAmelCase_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: UpperCAmelCase_ = disp.display(disp.HTML(self.html_code ) , display_id=__a ) else: self.output.update(disp.HTML(self.html_code ) ) def _lowercase (self : Optional[Any] ): if self.parent is None and self.output is not None: self.output.update(disp.HTML("" ) ) class __A ( UpperCamelCase__ ): def __init__(self : List[Any] , __a : List[Any] , __a : Dict=None ): super().__init__(__a ) UpperCAmelCase_ = None if column_names is None else [column_names] UpperCAmelCase_ = None def _lowercase (self : str ): UpperCAmelCase_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: UpperCAmelCase_ = disp.display(disp.HTML(self.html_code ) , display_id=__a ) else: self.output.update(disp.HTML(self.html_code ) ) def _lowercase (self : List[str] , __a : Union[str, Any] ): if self.inner_table is None: UpperCAmelCase_ = [list(values.keys() ), list(values.values() )] else: UpperCAmelCase_ = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(__a ) UpperCAmelCase_ = columns self.inner_table.append([values[c] for c in columns] ) def _lowercase (self : Dict , __a : Optional[int] , __a : int=None , __a : int=300 ): UpperCAmelCase_ = NotebookProgressBar(__a , prefix=__a , parent=self , width=__a ) return self.child_bar def _lowercase (self : Dict ): UpperCAmelCase_ = None self.display() class __A ( UpperCamelCase__ ): def __init__(self : Dict ): UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = False def _lowercase (self : int , __a : Optional[Any] , __a : Any , __a : List[str] , **__a : Union[str, Any] ): UpperCAmelCase_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = [self.first_column] + ["Training Loss"] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("Validation Loss" ) UpperCAmelCase_ = NotebookTrainingTracker(state.max_steps , __a ) def _lowercase (self : Union[str, Any] , __a : str , __a : Dict , __a : Dict , **__a : List[Any] ): UpperCAmelCase_ = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}""" self.training_tracker.update( state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , ) UpperCAmelCase_ = False def _lowercase (self : List[Any] , __a : str , __a : Optional[Any] , __a : Optional[int] , __a : Tuple=None , **__a : List[str] ): if not has_length(__a ): return if self.prediction_bar is None: if self.training_tracker is not None: UpperCAmelCase_ = self.training_tracker.add_child(len(__a ) ) else: UpperCAmelCase_ = NotebookProgressBar(len(__a ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def _lowercase (self : int , __a : List[str] , __a : Union[str, Any] , __a : Optional[int] , **__a : Tuple ): if self.prediction_bar is not None: self.prediction_bar.close() UpperCAmelCase_ = None def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Any , __a : List[Any] , __a : Tuple=None , **__a : List[Any] ): # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: UpperCAmelCase_ = {"Training Loss": logs["loss"]} # First column is necessarily Step sine we're not in epoch eval strategy UpperCAmelCase_ = state.global_step self.training_tracker.write_line(__a ) def _lowercase (self : Optional[int] , __a : str , __a : Tuple , __a : Optional[int] , __a : Union[str, Any]=None , **__a : List[Any] ): if self.training_tracker is not None: UpperCAmelCase_ = {"Training Loss": "No log", "Validation Loss": "No log"} for log in reversed(state.log_history ): if "loss" in log: UpperCAmelCase_ = log["loss"] break if self.first_column == "Epoch": UpperCAmelCase_ = int(state.epoch ) else: UpperCAmelCase_ = state.global_step UpperCAmelCase_ = "eval" for k in metrics: if k.endswith("_loss" ): UpperCAmelCase_ = re.sub(r"\_loss$" , "" , __a ) UpperCAmelCase_ = metrics.pop("total_flos" , __a ) UpperCAmelCase_ = metrics.pop("epoch" , __a ) UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_runtime""" , __a ) UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , __a ) UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , __a ) UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , __a ) for k, v in metrics.items(): if k == f"""{metric_key_prefix}_loss""": UpperCAmelCase_ = v else: UpperCAmelCase_ = k.split("_" ) UpperCAmelCase_ = " ".join([part.capitalize() for part in splits[1:]] ) UpperCAmelCase_ = v self.training_tracker.write_line(__a ) self.training_tracker.remove_child() UpperCAmelCase_ = None # Evaluation takes a long time so we should force the next update. UpperCAmelCase_ = True def _lowercase (self : Dict , __a : Dict , __a : Any , __a : Optional[Any] , **__a : Union[str, Any] ): self.training_tracker.update( state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=__a ) UpperCAmelCase_ = None
78
import itertools import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _A = 2 while True: if is_prime(_snake_case ): yield num num += 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) ) if __name__ == "__main__": print(f'{solution() = }')
2
0
import baseaa def _lowerCamelCase ( __lowerCamelCase ) -> bytes: '''simple docstring''' return baseaa.baaencode(string.encode("""utf-8""" ) ) def _lowerCamelCase ( __lowerCamelCase ) -> str: '''simple docstring''' return baseaa.baadecode(__lowerCamelCase ).decode("""utf-8""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Dict = """Hello World!""" SCREAMING_SNAKE_CASE__ : int = baseaa_encode(test) print(encoded) SCREAMING_SNAKE_CASE__ : Dict = baseaa_decode(encoded) print(decoded)
79
import collections import os import re from pathlib import Path UpperCAmelCase_ = """src/transformers""" # Matches is_xxx_available() UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase_ = re.compile(r"""^\s*try:""") # Catches a line with else: UpperCAmelCase_ = re.compile(r"""^\s*else:""") def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any: if _re_test_backend.search(_snake_case ) is None: return None _A = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any: with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _A = f.readlines() _A = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): _A = _re_one_line_import_struct.search(_snake_case ).groups()[0] _A = re.findall(r'''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: _A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: _A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any: def find_duplicates(_snake_case :Any ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = '''base imports''' if key == '''none''' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> int: _A = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: _A = os.path.join(_snake_case , '''__init__.py''' ) _A = parse_init(_snake_case ) if objects is not None: _A = analyze_results(*_snake_case ) if len(_snake_case ) > 0: _A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: _A = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue _A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) _A = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue _A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) _A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules UpperCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _A = direct_transformers_import(_snake_case ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f: _A = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_snake_case ) > 0: _A = '''\n'''.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F'''{list_of_modules}\n''' '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
2
0
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @slow def _a ( self : Any ) -> Tuple: """simple docstring""" __lowercase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) __lowercase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) model.to(_lowerCAmelCase ) from datasets import load_dataset __lowercase = load_dataset("""nielsr/rvlcdip-demo""" ) __lowercase = dataset["""train"""][0]["""image"""].convert("""RGB""" ) __lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase = model(**_lowerCAmelCase ) __lowercase = outputs.logits __lowercase = torch.Size((1, 16) ) self.assertEqual(logits.shape , _lowerCAmelCase ) __lowercase = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_lowerCAmelCase , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
80
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_A) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]: super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int: _A = load_image(__lowerCAmelCase ) if prompt is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(__lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase ) return model_outputs def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , ) } records.append(__lowerCAmelCase ) return records
2
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _snake_case : Any = logging.get_logger(__name__) def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: __snake_case : int = 1_9_2 __snake_case : List[Any] = 7_6_8 __snake_case : Union[str, Any] = 1_2 __snake_case : Optional[Any] = 3 __snake_case : Any = [8_0_0, 1_3_3_3] __snake_case : List[str] = False elif yolos_name == "yolos_s_dWr": __snake_case : List[str] = 3_3_0 __snake_case : Dict = 1_4 __snake_case : List[str] = 6 __snake_case : Union[str, Any] = 1_3_2_0 elif "yolos_s" in yolos_name: __snake_case : Union[str, Any] = 3_8_4 __snake_case : Dict = 1_5_3_6 __snake_case : int = 1_2 __snake_case : Optional[Any] = 6 elif "yolos_b" in yolos_name: __snake_case : Union[str, Any] = [8_0_0, 1_3_4_4] __snake_case : Optional[int] = 9_1 __snake_case : Any = "huggingface/label-files" __snake_case : Optional[int] = "coco-detection-id2label.json" __snake_case : Optional[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) ) __snake_case : Dict = {int(__lowerCamelCase ): v for k, v in idalabel.items()} __snake_case : str = idalabel __snake_case : int = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __snake_case : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' ) __snake_case : int = state_dict.pop(F'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __snake_case : Any = in_proj_weight[: config.hidden_size, :] __snake_case : Dict = in_proj_bias[: config.hidden_size] __snake_case : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __snake_case : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __snake_case : List[Any] = in_proj_weight[-config.hidden_size :, :] __snake_case : Union[str, Any] = in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( __lowerCamelCase ): if "backbone" in name: __snake_case : Any = name.replace("backbone" , "vit" ) if "cls_token" in name: __snake_case : Any = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: __snake_case : Optional[Any] = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: __snake_case : Optional[int] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: __snake_case : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: __snake_case : List[str] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: __snake_case : Dict = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: __snake_case : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: __snake_case : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: __snake_case : Union[str, Any] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __snake_case : List[str] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __snake_case : int = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __snake_case : List[str] = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: __snake_case : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: __snake_case : List[Any] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: __snake_case : Union[str, Any] = name.replace("vit.norm" , "vit.layernorm" ) return name def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): for key in orig_state_dict.copy().keys(): __snake_case : str = orig_state_dict.pop(__lowerCamelCase ) if "qkv" in key: __snake_case : Tuple = key.split("." ) __snake_case : List[Any] = int(key_split[2] ) __snake_case : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: __snake_case : Optional[Any] = val[:dim, :] __snake_case : Union[str, Any] = val[ dim : dim * 2, : ] __snake_case : List[Any] = val[-dim:, :] else: __snake_case : Tuple = val[:dim] __snake_case : Optional[int] = val[dim : dim * 2] __snake_case : Tuple = val[-dim:] else: __snake_case : Tuple = val return orig_state_dict def lowerCAmelCase_ ( ): __snake_case : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ): __snake_case : str = get_yolos_config(__lowerCamelCase ) # load original state_dict __snake_case : int = torch.load(__lowerCamelCase , map_location="cpu" )["model"] # load 🤗 model __snake_case : str = YolosForObjectDetection(__lowerCamelCase ) model.eval() __snake_case : Dict = convert_state_dict(__lowerCamelCase , __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by YolosImageProcessor __snake_case : Dict = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2 __snake_case : str = YolosImageProcessor(format="coco_detection" , size=__lowerCamelCase ) __snake_case : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) __snake_case : Optional[Any] = model(**__lowerCamelCase ) __snake_case , __snake_case : Tuple = outputs.logits, outputs.pred_boxes __snake_case , __snake_case : Tuple = None, None if yolos_name == "yolos_ti": __snake_case : List[str] = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) __snake_case : str = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": __snake_case : List[str] = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) __snake_case : Union[str, Any] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": __snake_case : Tuple = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) __snake_case : Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": __snake_case : Optional[int] = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) __snake_case : Dict = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": __snake_case : List[str] = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) __snake_case : Optional[Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F'Unknown yolos_name: {yolos_name}' ) assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__lowerCamelCase ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__lowerCamelCase ) if push_to_hub: __snake_case : Dict = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) __snake_case : Optional[Any] = model_mapping[yolos_name] image_processor.push_to_hub(__lowerCamelCase , organization="hustvl" ) model.push_to_hub(__lowerCamelCase , organization="hustvl" ) if __name__ == "__main__": _snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _snake_case : str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
81
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str: _A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' _A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' ) _A = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
2
0
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Tuple=0 ) -> Dict: '''simple docstring''' UpperCAmelCase_ = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) ) UpperCAmelCase_ = np.random.RandomState(_UpperCAmelCase ) UpperCAmelCase_ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = self.get_dummy_inputs() UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) UpperCAmelCase_ = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowercase__ ( self : Dict ) -> str: '''simple docstring''' UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCAmelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = self.get_dummy_inputs() UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase_ = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase__ ( self : Dict ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) # warmup pass to apply optimizations UpperCAmelCase_ = pipe(**self.get_dummy_inputs() ) UpperCAmelCase_ = self.get_dummy_inputs() UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase_ = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCAmelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = self.get_dummy_inputs() UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase_ = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase__ ( self : Any ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCAmelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = self.get_dummy_inputs() UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase_ = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase__ ( self : List[str] ) -> Any: '''simple docstring''' UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = self.get_dummy_inputs() UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase_ = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowercase__ ( unittest.TestCase ): '''simple docstring''' @property def lowercase__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = ort.SessionOptions() UpperCAmelCase_ = False return options def lowercase__ ( self : Dict ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) UpperCAmelCase_ = init_image.resize((768, 512) ) # using the PNDM scheduler by default UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = "A fantasy landscape, trending on artstation" UpperCAmelCase_ = np.random.RandomState(0 ) UpperCAmelCase_ = pipe( prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type="np" , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) UpperCAmelCase_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowercase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) UpperCAmelCase_ = init_image.resize((768, 512) ) UpperCAmelCase_ = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = "A fantasy landscape, trending on artstation" UpperCAmelCase_ = np.random.RandomState(0 ) UpperCAmelCase_ = pipe( prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type="np" , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) UpperCAmelCase_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
82
from graphs.minimum_spanning_tree_kruskal import kruskal def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: _A = 9 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = kruskal(_snake_case , _snake_case ) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_snake_case ) == sorted(_snake_case )
2
0
"""simple docstring""" def snake_case_ ( A_ : List[str] ): '''simple docstring''' _lowerCamelCase : Tuple = 0 _lowerCamelCase : Union[str, Any] = len(A_ ) for i in range(n - 1 ): for j in range(i + 1, A_ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def snake_case_ ( A_ : List[str] ): '''simple docstring''' if len(A_ ) <= 1: return arr, 0 _lowerCamelCase : int = len(A_ ) // 2 _lowerCamelCase : int = arr[0:mid] _lowerCamelCase : int = arr[mid:] _lowerCamelCase , _lowerCamelCase : str = count_inversions_recursive(A_ ) _lowerCamelCase , _lowerCamelCase : int = count_inversions_recursive(A_ ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = _count_cross_inversions(A_, A_ ) _lowerCamelCase : Optional[Any] = inversion_p + inversions_q + cross_inversions return c, num_inversions def snake_case_ ( A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Tuple = [] _lowerCamelCase : List[str] = 0 while i < len(A_ ) and j < len(A_ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(A_ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(A_ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Tuple = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCamelCase : List[str] = count_inversions_bf(A_ ) _lowerCamelCase , _lowerCamelCase : List[str] = count_inversions_recursive(A_ ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''', A_ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCamelCase : List[Any] = count_inversions_bf(A_ ) _lowerCamelCase , _lowerCamelCase : List[Any] = count_inversions_recursive(A_ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''', A_ ) # an empty list should also have zero inversions _lowerCamelCase : str = [] _lowerCamelCase : Optional[int] = count_inversions_bf(A_ ) _lowerCamelCase , _lowerCamelCase : Any = count_inversions_recursive(A_ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''', A_ ) if __name__ == "__main__": main()
83
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): return (data["data"], data["target"]) def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Predict target for test data lowercase = xgb.predict(__SCREAMING_SNAKE_CASE ) lowercase = predictions.reshape(len(__SCREAMING_SNAKE_CASE ) , 1 ) return predictions def UpperCAmelCase_ ( ): lowercase = fetch_california_housing() lowercase , lowercase = data_handling(__SCREAMING_SNAKE_CASE ) lowercase , lowercase , lowercase , lowercase = train_test_split( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , test_size=0.25 , random_state=1 ) lowercase = xgboost(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}''' ) print(F'''Mean Square Error : {mean_squared_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
84
UpperCAmelCase_ = 2_5_6 # Modulus to hash a string UpperCAmelCase_ = 1_0_0_0_0_0_3 def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool: _A = len(_snake_case ) _A = len(_snake_case ) if p_len > t_len: return False _A = 0 _A = 0 _A = 1 # Calculating the hash of pattern and substring of text for i in range(_snake_case ): _A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def SCREAMING_SNAKE_CASE_ ( ) -> None: _A = '''abc1abc12''' _A = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _A = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case ) # Test 2) _A = '''ABABX''' _A = '''ABABZABABYABABX''' assert rabin_karp(_snake_case , _snake_case ) # Test 3) _A = '''AAAB''' _A = '''ABAAAAAB''' assert rabin_karp(_snake_case , _snake_case ) # Test 4) _A = '''abcdabcy''' _A = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(_snake_case , _snake_case ) # Test 5) _A = '''Lü''' _A = '''Lüsai''' assert rabin_karp(_snake_case , _snake_case ) _A = '''Lue''' assert not rabin_karp(_snake_case , _snake_case ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
2
0
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase_ ) class snake_case ( UpperCamelCase_ ): def __init__( self : List[str] , **a_ : Tuple )-> List[str]: """simple docstring""" super().__init__(**a_ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Union[str, Any] , a_ : Union[str, List[str], "Image", List["Image"]] , **a_ : List[str] )-> Optional[Any]: """simple docstring""" return super().__call__(a_ , **a_ ) def __lowercase( self : Dict , **a_ : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = {} if "candidate_labels" in kwargs: SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: SCREAMING_SNAKE_CASE__ : str = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __lowercase( self : str , a_ : int , a_ : Optional[Any]=None , a_ : int="This is a photo of {}." )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = load_image(a_ ) SCREAMING_SNAKE_CASE__ : Any = self.image_processor(images=[image] , return_tensors=self.framework ) SCREAMING_SNAKE_CASE__ : str = candidate_labels SCREAMING_SNAKE_CASE__ : Any = [hypothesis_template.format(a_ ) for x in candidate_labels] SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(a_ , return_tensors=self.framework , padding=a_ ) SCREAMING_SNAKE_CASE__ : int = [text_inputs] return inputs def __lowercase( self : Union[str, Any] , a_ : List[str] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = model_inputs.pop('candidate_labels' ) SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , a_ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_inputs[0] else: # Batching case. SCREAMING_SNAKE_CASE__ : Optional[Any] = text_inputs[0][0] SCREAMING_SNAKE_CASE__ : Any = self.model(**a_ , **a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __lowercase( self : Optional[int] , a_ : Optional[int] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = model_outputs.pop('candidate_labels' ) SCREAMING_SNAKE_CASE__ : Optional[int] = model_outputs['logits'][0] if self.framework == "pt": SCREAMING_SNAKE_CASE__ : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 ) SCREAMING_SNAKE_CASE__ : str = probs.tolist() if not isinstance(a_ , a_ ): SCREAMING_SNAKE_CASE__ : List[str] = [scores] elif self.framework == "tf": SCREAMING_SNAKE_CASE__ : Optional[int] = stable_softmax(a_ , axis=-1 ) SCREAMING_SNAKE_CASE__ : int = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) SCREAMING_SNAKE_CASE__ : List[Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(a_ , a_ ) , key=lambda a_ : -x[0] ) ] return result
85
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } UpperCAmelCase_ = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } UpperCAmelCase_ = """</w>""" UpperCAmelCase_ = """@@ """ def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs # Speech2Text2 has no max input length UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict: super().__init__( unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , ) _A = do_lower_case with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: _A = json.load(__lowerCAmelCase ) _A = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) _A = None _A = None else: with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: _A = merges_handle.read().split('''\n''' )[:-1] _A = [tuple(merge.split()[:2] ) for merge in merges] _A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _A = {} @property def snake_case_ ( self : List[str] ) -> int: return len(self.decoder ) def snake_case_ ( self : Dict ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: _A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(__lowerCAmelCase ): try: _A = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__lowerCAmelCase ) _A = new_word if len(__lowerCAmelCase ) == 1: break else: _A = get_pairs(__lowerCAmelCase ) _A = ''' '''.join(__lowerCAmelCase ) if word == "\n " + BPE_TOKEN_MERGES: _A = '''\n''' + BPE_TOKEN_MERGES if word.endswith(__lowerCAmelCase ): _A = word.replace(__lowerCAmelCase , '''''' ) _A = word.replace(''' ''' , __lowerCAmelCase ) _A = word return word def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: _A = text.lower() _A = text.split() _A = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int: return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str: _A = self.decoder.get(__lowerCAmelCase , self.unk_token ) return result def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str: _A = ''' '''.join(__lowerCAmelCase ) # make sure @@ tokens are concatenated _A = ''''''.join(string.split(__lowerCAmelCase ) ) return string def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) _A = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) _A = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
2
0
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __a :Tuple = logging.get_logger(__name__) __a :int = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class _a ( snake_case_ ): """simple docstring""" def __init__( self : str , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=None , *UpperCAmelCase : int , **UpperCAmelCase : List[str] ): super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if config is None: assert isinstance(self.model , UpperCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) A_ = self.model.config else: A_ = config A_ = data_args A_ = self.config.tgt_vocab_size if isinstance(self.config , UpperCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' " padding.." ) if self.args.label_smoothing == 0: A_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss A_ = label_smoothed_nll_loss def __A ( self : Dict , UpperCAmelCase : int ): if self.optimizer is None: A_ = ["bias", "LayerNorm.weight"] A_ = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] A_ = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: A_ = Adafactor A_ = {"scale_parameter": False, "relative_step": False} else: A_ = AdamW A_ = { "betas": (self.args.adam_betaa, self.args.adam_betaa), "eps": self.args.adam_epsilon, } A_ = self.args.learning_rate if self.sharded_ddp: A_ = OSS( params=UpperCAmelCase , optim=UpperCAmelCase , **UpperCAmelCase , ) else: A_ = optimizer_cls(UpperCAmelCase , **UpperCAmelCase ) if self.lr_scheduler is None: A_ = self._get_lr_scheduler(UpperCAmelCase ) else: # ignoring --lr_scheduler logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." ) def __A ( self : Tuple , UpperCAmelCase : List[Any] ): A_ = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": A_ = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": A_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: A_ = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCAmelCase ) return scheduler def __A ( self : Optional[int] ): if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : str ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token A_ = model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0] A_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models A_ , A_ = model(**UpperCAmelCase , labels=UpperCAmelCase , use_cache=UpperCAmelCase )[:2] else: # compute label smoothed loss A_ = model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0] A_ = torch.nn.functional.log_softmax(UpperCAmelCase , dim=-1 ) A_ , A_ = self.loss_fn(UpperCAmelCase , UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def __A ( self : int , UpperCAmelCase : str , UpperCAmelCase : int ): A_ = inputs.pop("labels" ) A_ , A_ = self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return loss def __A ( self : Optional[Any] , UpperCAmelCase : nn.Module , UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] , UpperCAmelCase : bool , UpperCAmelCase : Optional[List[str]] = None , ): A_ = self._prepare_inputs(UpperCAmelCase ) A_ = { "max_length": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, "num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: A_ = self.model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **UpperCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: A_ = self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs["max_length"] ) A_ = inputs.pop("labels" ) with torch.no_grad(): # compute loss on predict data A_ , A_ = self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) A_ = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: A_ = self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs["max_length"] ) return (loss, logits, labels) def __A ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ): # If PAD token is not defined at least EOS token has to be defined A_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( "Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be" f''' padded to `max_length`={max_length}''' ) A_ = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) A_ = tensor return padded_tensor
86
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar("""T""") def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : Optional[int] ) -> None: _A = [] _A = {} _A = 0 def __len__( self : str ) -> int: return self.elements def __repr__( self : Optional[int] ) -> str: return str(self.heap ) def snake_case_ ( self : str ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(__lowerCAmelCase ) def snake_case_ ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _A , _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A , _A = self.heap[0] self._bubble_down(__lowerCAmelCase ) return elem def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Update the weight of the given key _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) else: self._bubble_down(__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(__lowerCAmelCase ) _A , _A = self.heap[curr_pos] _A , _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_up(__lowerCAmelCase ) return None def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _A = self.position_map[elem] _A , _A = self.heap[curr_pos] _A = get_child_left_position(__lowerCAmelCase ) _A = get_child_right_position(__lowerCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _A , _A = self.heap[child_left_position] _A , _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) if child_left_position < self.elements: _A , _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) else: return None if child_right_position < self.elements: _A , _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase ) return self._bubble_down(__lowerCAmelCase ) return None def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: # Swap the nodes at the given positions _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A , _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowerCamelCase__ ( Generic[T]): """simple docstring""" def __init__( self : str ) -> None: _A = {} _A = 0 def __repr__( self : str ) -> str: return str(self.connections ) def __len__( self : Dict ) -> int: return self.nodes def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _A = {} self.nodes += 1 def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowerCAmelCase ) self.add_node(__lowerCAmelCase ) _A = weight _A = weight def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]: _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_snake_case , _snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_snake_case , dist[neighbour] ) _A = node return dist, parent
2
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''philschmid/bart-large-cnn-samsum''' UpperCAmelCase__ = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) UpperCAmelCase__ = '''summarizer''' UpperCAmelCase__ = AutoTokenizer UpperCAmelCase__ = AutoModelForSeqaSeqLM UpperCAmelCase__ = ['''text'''] UpperCAmelCase__ = ['''text'''] def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[Any]) ->List[Any]: '''simple docstring''' return self.pre_processor(UpperCAmelCase__ , return_tensors='''pt''' , truncation=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' return self.model.generate(**UpperCAmelCase__)[0] def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : str) ->str: '''simple docstring''' return self.pre_processor.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__)
87
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = """▁""" UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} UpperCAmelCase_ = { """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Tuple = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) _A = vocab_file _A = monolingual_vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _A = {} _A = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = cnt cnt += 1 with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _A = line.strip().split()[0] _A = len(self.fairseq_tokens_to_ids ) if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids: _A = len(self.fairseq_tokens_to_ids ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> List[Any]: _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]: _A = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case_ ( self : Optional[int] ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def snake_case_ ( self : Dict ) -> Optional[Any]: _A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]: return self.fairseq_ids_to_tokens[index] def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple: _A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__lowerCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
2
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]: _A , _A = set(_snake_case ), [start] while stack: _A = stack.pop() explored.add(_snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_snake_case ) return explored UpperCAmelCase_ = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
2
0
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( "The `inpainting.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionInpaintPipeline` instead." )
89
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
2
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __UpperCAmelCase = logging.get_logger(__name__) class a__ ( a__ ): '''simple docstring''' lowercase__ : Any = "upernet" def __init__( self , lowerCamelCase_=None , lowerCamelCase_=5_12 , lowerCamelCase_=0.02 , lowerCamelCase_=[1, 2, 3, 6] , lowerCamelCase_=True , lowerCamelCase_=0.4 , lowerCamelCase_=3_84 , lowerCamelCase_=2_56 , lowerCamelCase_=1 , lowerCamelCase_=False , lowerCamelCase_=2_55 , **lowerCamelCase_ , ) -> Union[str, Any]: super().__init__(**lowerCamelCase_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCAmelCase__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = backbone_config.get('''model_type''' ) lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase__ = config_class.from_dict(lowerCamelCase_ ) lowerCAmelCase__ = backbone_config lowerCAmelCase__ = hidden_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = pool_scales lowerCAmelCase__ = use_auxiliary_head lowerCAmelCase__ = auxiliary_loss_weight lowerCAmelCase__ = auxiliary_in_channels lowerCAmelCase__ = auxiliary_channels lowerCAmelCase__ = auxiliary_num_convs lowerCAmelCase__ = auxiliary_concat_input lowerCAmelCase__ = loss_ignore_index def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ = self.backbone_config.to_dict() lowerCAmelCase__ = self.__class__.model_type return output
90
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Any = "xlnet" a__ : Dict = ["mems"] a__ : List[str] = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple: _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs['''use_cache'''] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
2
0
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str: return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """OPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OPTForCausalLM""", """OPTModel""", """OPTPreTrainedModel""", """OPTForSequenceClassification""", """OPTForQuestionAnswering""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxOPTForCausalLM""", """FlaxOPTModel""", """FlaxOPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(_snake_case ) == 1: return True _A = series[1] - series[0] for index in range(len(_snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float: if not isinstance(_snake_case , _snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(_snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) _A = 0 for val in series: answer += val return answer / len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __magic_name__ :int = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[Any] = pipeline( 'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' ) lowerCAmelCase__ :int = [ { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'candidate_labels': ['cat', 'remote', 'couch'], } ] return object_detector, examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = object_detector(examples[0] , threshold=0.0 ) lowerCAmelCase__ :Optional[Any] = len(__UpperCAmelCase ) self.assertGreater(__UpperCAmelCase , 0 ) self.assertEqual( __UpperCAmelCase , [ { 'score': ANY(__UpperCAmelCase ), 'label': ANY(__UpperCAmelCase ), 'box': {'xmin': ANY(__UpperCAmelCase ), 'ymin': ANY(__UpperCAmelCase ), 'xmax': ANY(__UpperCAmelCase ), 'ymax': ANY(__UpperCAmelCase )}, } for i in range(__UpperCAmelCase ) ] , ) @require_tf @unittest.skip('Zero Shot Object Detection not implemented in TF' ) def snake_case ( self ): '''simple docstring''' pass @require_torch def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = pipeline( 'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' ) lowerCAmelCase__ :str = object_detector( './tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}}, {'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}}, {'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}}, ] , ) lowerCAmelCase__ :Any = object_detector( [ { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'candidate_labels': ['cat', 'remote', 'couch'], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}}, {'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}}, {'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}}, ] ] , ) @require_torch @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = pipeline('zero-shot-object-detection' ) lowerCAmelCase__ :str = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}}, {'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}}, {'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}}, {'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}}, {'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}}, ] , ) lowerCAmelCase__ :Any = object_detector( [ { 'image': 'http://images.cocodataset.org/val2017/000000039769.jpg', 'candidate_labels': ['cat', 'remote', 'couch'], }, { 'image': 'http://images.cocodataset.org/val2017/000000039769.jpg', 'candidate_labels': ['cat', 'remote', 'couch'], }, ] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}}, {'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}}, {'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}}, {'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}}, {'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}}, ], [ {'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}}, {'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}}, {'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}}, {'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}}, {'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}}, ], ] , ) @require_tf @unittest.skip('Zero Shot Object Detection not implemented in TF' ) def snake_case ( self ): '''simple docstring''' pass @require_torch @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = 0.2 lowerCAmelCase__ :Optional[Any] = pipeline('zero-shot-object-detection' ) lowerCAmelCase__ :Optional[Any] = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}}, {'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}}, {'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}}, ] , ) @require_torch @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 2 lowerCAmelCase__ :Tuple = pipeline('zero-shot-object-detection' ) lowerCAmelCase__ :str = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}}, {'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}}, ] , )
93
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class UpperCAmelCase_ ( __A ): """simple docstring""" UpperCamelCase_ = None UpperCamelCase_ = None UpperCamelCase_ = None UpperCamelCase_ = None class UpperCAmelCase_ ( __A ): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : Tuple=1 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : Optional[int]="cls" , UpperCAmelCase : Tuple=False , UpperCAmelCase : Union[str, Any]=True , **UpperCAmelCase : str , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) lowercase : Union[str, Any] =project_dim lowercase : str =pooler_fn lowercase : Any =learn_encoder lowercase : Any =use_attention_mask class UpperCAmelCase_ ( __A ): """simple docstring""" UpperCamelCase_ = [r'''pooler''', r'''logit_scale'''] UpperCamelCase_ = [r'''position_ids''', r'''predictions.decoder.bias'''] UpperCamelCase_ = '''roberta''' UpperCamelCase_ = RobertaSeriesConfig def __init__( self : Dict , UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' super().__init__(UpperCAmelCase ) lowercase : Dict =XLMRobertaModel(UpperCAmelCase ) lowercase : str =nn.Linear(config.hidden_size , config.project_dim ) lowercase : Dict =getattr(UpperCAmelCase , '''has_pre_transformation''' , UpperCAmelCase ) if self.has_pre_transformation: lowercase : Union[str, Any] =nn.Linear(config.hidden_size , config.project_dim ) lowercase : Any =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def A__ ( self : Union[str, Any] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , ) -> Union[str, Any]: '''simple docstring''' lowercase : int =return_dict if return_dict is not None else self.config.use_return_dict lowercase : int =self.base_model( input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , position_ids=UpperCAmelCase , head_mask=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_attentions=UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCAmelCase , ) if self.has_pre_transformation: lowercase : Union[str, Any] =outputs['''hidden_states'''][-2] lowercase : Optional[int] =self.pre_LN(UpperCAmelCase ) lowercase : Dict =self.transformation_pre(UpperCAmelCase ) return TransformationModelOutput( projection_state=UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: lowercase : Dict =self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
94
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]: for attribute in key.split('''.''' ): _A = getattr(_snake_case , _snake_case ) if weight_type is not None: _A = getattr(_snake_case , _snake_case ).shape else: _A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any: _A = [] _A = fairseq_model.state_dict() _A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _A = True else: for key, mapped_key in MAPPING.items(): _A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _A = True if "*" in mapped_key: _A = name.split(_snake_case )[0].split('''.''' )[-2] _A = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: _A = '''weight_g''' elif "weight_v" in name: _A = '''weight_v''' elif "weight" in name: _A = '''weight''' elif "bias" in name: _A = '''bias''' else: _A = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any: _A = full_name.split('''conv_layers.''' )[-1] _A = name.split('''.''' ) _A = int(items[0] ) _A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_snake_case ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple: _A = SEWConfig() if is_finetuned: _A = model.wav_encoder.wav_model.cfg else: _A = model.cfg _A = fs_config.conv_bias _A = eval(fs_config.conv_feature_layers ) _A = [x[0] for x in conv_layers] _A = [x[1] for x in conv_layers] _A = [x[2] for x in conv_layers] _A = '''gelu''' _A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _A = 0.0 _A = fs_config.activation_fn.name _A = fs_config.encoder_embed_dim _A = 0.02 _A = fs_config.encoder_ffn_embed_dim _A = 1E-5 _A = fs_config.encoder_layerdrop _A = fs_config.encoder_attention_heads _A = fs_config.conv_pos_groups _A = fs_config.conv_pos _A = len(_snake_case ) _A = fs_config.encoder_layers _A = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _A = model.cfg _A = fs_config.final_dropout _A = fs_config.layerdrop _A = fs_config.activation_dropout _A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _A = fs_config.attention_dropout _A = fs_config.dropout_input _A = fs_config.dropout _A = fs_config.mask_channel_length _A = fs_config.mask_channel_prob _A = fs_config.mask_length _A = fs_config.mask_prob _A = '''Wav2Vec2FeatureExtractor''' _A = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]: if is_finetuned: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _A = SEWConfig.from_pretrained(_snake_case ) else: _A = convert_config(model[0] , _snake_case ) _A = model[0].eval() _A = True if config.feat_extract_norm == '''layer''' else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) if is_finetuned: if dict_path: _A = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols ) _A = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _snake_case ) _A = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) _A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) _A = SEWForCTC(_snake_case ) else: _A = SEWModel(_snake_case ) feature_extractor.save_pretrained(_snake_case ) recursively_load_weights(_snake_case , _snake_case , _snake_case ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
2
0
"""simple docstring""" import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Union[str, Any] = old_name if "patch_embed" in old_name: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = old_name.split("." ) if layer == "0": UpperCAmelCase_ : List[Any] = old_name.replace("0" ,"convolution1" ) elif layer == "1": UpperCAmelCase_ : List[str] = old_name.replace("1" ,"batchnorm_before" ) elif layer == "3": UpperCAmelCase_ : Union[str, Any] = old_name.replace("3" ,"convolution2" ) else: UpperCAmelCase_ : Union[str, Any] = old_name.replace("4" ,"batchnorm_after" ) if "network" in old_name and re.search(r"\d\.\d" ,A__ ): UpperCAmelCase_ : int = r"\b\d{2}\b" if bool(re.search(A__ ,A__ ) ): UpperCAmelCase_ : List[Any] = re.search(r"\d\.\d\d." ,A__ ).group() else: UpperCAmelCase_ : Tuple = re.search(r"\d\.\d." ,A__ ).group() if int(match[0] ) < 6: UpperCAmelCase_ : List[Any] = old_name.replace(A__ ,"" ) UpperCAmelCase_ : str = trimmed_name.replace("network" ,match[0] + ".meta4D_layers.blocks." + match[2:-1] ) UpperCAmelCase_ : List[Any] = "intermediate_stages." + trimmed_name else: UpperCAmelCase_ : Dict = old_name.replace(A__ ,"" ) if int(match[2] ) < num_meta4D_last_stage: UpperCAmelCase_ : List[Any] = trimmed_name.replace("network" ,"meta4D_layers.blocks." + match[2] ) else: UpperCAmelCase_ : Dict = str(int(match[2] ) - num_meta4D_last_stage ) UpperCAmelCase_ : List[Any] = trimmed_name.replace("network" ,"meta3D_layers.blocks." + layer_index ) if "norm1" in old_name: UpperCAmelCase_ : Tuple = trimmed_name.replace("norm1" ,"layernorm1" ) elif "norm2" in old_name: UpperCAmelCase_ : Dict = trimmed_name.replace("norm2" ,"layernorm2" ) elif "fc1" in old_name: UpperCAmelCase_ : Tuple = trimmed_name.replace("fc1" ,"linear_in" ) elif "fc2" in old_name: UpperCAmelCase_ : str = trimmed_name.replace("fc2" ,"linear_out" ) UpperCAmelCase_ : str = "last_stage." + trimmed_name elif "network" in old_name and re.search(r".\d." ,A__ ): UpperCAmelCase_ : Any = old_name.replace("network" ,"intermediate_stages" ) if "fc" in new_name: UpperCAmelCase_ : Dict = new_name.replace("fc" ,"convolution" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): UpperCAmelCase_ : Optional[Any] = new_name.replace("norm1" ,"batchnorm_before" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): UpperCAmelCase_ : List[str] = new_name.replace("norm2" ,"batchnorm_after" ) if "proj" in new_name: UpperCAmelCase_ : Dict = new_name.replace("proj" ,"projection" ) if "dist_head" in new_name: UpperCAmelCase_ : Tuple = new_name.replace("dist_head" ,"distillation_classifier" ) elif "head" in new_name: UpperCAmelCase_ : Any = new_name.replace("head" ,"classifier" ) elif "patch_embed" in new_name: UpperCAmelCase_ : int = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": UpperCAmelCase_ : Tuple = new_name.replace("norm" ,"layernorm" ) UpperCAmelCase_ : Optional[int] = "efficientformer." + new_name else: UpperCAmelCase_ : Any = "efficientformer.encoder." + new_name return new_name def snake_case ( A__ ,A__ ): for key in checkpoint.copy().keys(): UpperCAmelCase_ : int = checkpoint.pop(A__ ) UpperCAmelCase_ : Tuple = val return checkpoint def snake_case ( ): UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : int = Image.open(requests.get(A__ ,stream=A__ ).raw ) return image def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"] UpperCAmelCase_ : Any = EfficientFormerConfig.from_json_file(A__ ) UpperCAmelCase_ : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(A__ ) UpperCAmelCase_ : Optional[int] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] ) UpperCAmelCase_ : Optional[Any] = config.depths[-1] - config.num_metaad_blocks + 1 UpperCAmelCase_ : List[str] = convert_torch_checkpoint(A__ ,A__ ) model.load_state_dict(A__ ) model.eval() UpperCAmelCase_ : Any = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image UpperCAmelCase_ : Tuple = prepare_img() UpperCAmelCase_ : Tuple = 2_56 UpperCAmelCase_ : Any = 2_24 UpperCAmelCase_ : str = EfficientFormerImageProcessor( size={"shortest_edge": image_size} ,crop_size={"height": crop_size, "width": crop_size} ,resample=pillow_resamplings["bicubic"] ,) UpperCAmelCase_ : List[str] = processor(images=A__ ,return_tensors="pt" ).pixel_values # original processing pipeline UpperCAmelCase_ : List[Any] = Compose( [ Resize(A__ ,interpolation=pillow_resamplings["bicubic"] ), CenterCrop(A__ ), ToTensor(), Normalize(A__ ,A__ ), ] ) UpperCAmelCase_ : str = image_transforms(A__ ).unsqueeze(0 ) assert torch.allclose(A__ ,A__ ) UpperCAmelCase_ : Optional[Any] = model(A__ ) UpperCAmelCase_ : int = outputs.logits UpperCAmelCase_ : Tuple = (1, 10_00) if "l1" in model_name: UpperCAmelCase_ : Dict = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] ,A__ ,atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: UpperCAmelCase_ : Optional[Any] = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] ,A__ ,atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: UpperCAmelCase_ : int = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(A__ ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("Pushing model to the hub..." ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" ,commit_message="Add model" ,use_temp_dir=A__ ,) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" ,commit_message="Add image processor" ,use_temp_dir=A__ ,) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to EfficientFormer pytorch checkpoint.''', ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for EfficientFormer model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) parser.set_defaults(push_to_hub=True) lowerCamelCase_ = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
95
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any: pass @is_pipeline_test @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @require_torch def snake_case_ ( self : Tuple ) -> Tuple: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def snake_case_ ( self : int ) -> Optional[int]: _A = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) _A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )}, ], ] , ) @slow @require_torch def snake_case_ ( self : Optional[int] ) -> int: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def snake_case_ ( self : Optional[int] ) -> Dict: _A = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
2
0
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __lowerCamelCase = 'pt' elif is_tf_available(): __lowerCamelCase = 'tf' else: __lowerCamelCase = 'jax' class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ): UpperCAmelCase__ = ByTaTokenizer UpperCAmelCase__ = False def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]: super().setUp() __magic_name__: Union[str, Any] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase__ ( self : str ) -> str: return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def lowerCamelCase__ ( self : str , **__snake_case : List[str] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case ) def lowerCamelCase__ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : List[str]=False , __snake_case : str=2_0 , __snake_case : Dict=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__: List[Any] = [] for i in range(len(__snake_case ) ): try: __magic_name__: Any = tokenizer.decode([i] , clean_up_tokenization_spaces=__snake_case ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__: List[Any] = list(filter(lambda __snake_case : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __snake_case ) ) __magic_name__: Union[str, Any] = list(filter(lambda __snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__snake_case ) , __snake_case ) ) if max_length is not None and len(__snake_case ) > max_length: __magic_name__: Dict = toks[:max_length] if min_length is not None and len(__snake_case ) < min_length and len(__snake_case ) > 0: while len(__snake_case ) < min_length: __magic_name__: int = toks + toks # toks_str = [t[1] for t in toks] __magic_name__: str = [t[0] for t in toks] # Ensure consistency __magic_name__: int = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case ) if " " not in output_txt and len(__snake_case ) > 1: __magic_name__: List[Any] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__snake_case ) + """ """ + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__snake_case ) ) if with_prefix_space: __magic_name__: Any = """ """ + output_txt __magic_name__: List[Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) return output_txt, output_ids def lowerCamelCase__ ( self : Any ) -> Optional[Any]: __magic_name__: int = self.ta_base_tokenizer __magic_name__: List[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) __magic_name__: List[Any] = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] ) def lowerCamelCase__ ( self : List[Any] ) -> List[Any]: __magic_name__: Any = self.ta_base_tokenizer __magic_name__: str = """Unicode €.""" __magic_name__: Any = tokenizer(__snake_case ) __magic_name__: Any = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1] self.assertEqual(encoded["""input_ids"""] , __snake_case ) # decoding __magic_name__: Optional[Any] = tokenizer.decode(__snake_case ) self.assertEqual(__snake_case , """Unicode €.</s>""" ) __magic_name__: str = tokenizer("""e è é ê ë""" ) __magic_name__: List[str] = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1] self.assertEqual(encoded["""input_ids"""] , __snake_case ) # decoding __magic_name__: Optional[int] = tokenizer.decode(__snake_case ) self.assertEqual(__snake_case , """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" ) def lowerCamelCase__ ( self : str ) -> List[Any]: __magic_name__: Optional[int] = self.ta_base_tokenizer __magic_name__: Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off __magic_name__: Any = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0] # fmt: on __magic_name__: Dict = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) if FRAMEWORK != "jax": __magic_name__: Optional[Any] = list(batch.input_ids.numpy()[0] ) else: __magic_name__: List[str] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__snake_case , __snake_case ) self.assertEqual((2, 3_7) , batch.input_ids.shape ) self.assertEqual((2, 3_7) , batch.attention_mask.shape ) def lowerCamelCase__ ( self : List[str] ) -> int: __magic_name__: Tuple = self.ta_base_tokenizer __magic_name__: Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __magic_name__: Tuple = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" , __snake_case ) self.assertIn("""attention_mask""" , __snake_case ) self.assertNotIn("""decoder_input_ids""" , __snake_case ) self.assertNotIn("""decoder_attention_mask""" , __snake_case ) def lowerCamelCase__ ( self : List[str] ) -> List[Any]: __magic_name__: Optional[int] = self.ta_base_tokenizer __magic_name__: Dict = [ """Summary of the text.""", """Another summary.""", ] __magic_name__: List[str] = tokenizer( text_target=__snake_case , max_length=3_2 , padding="""max_length""" , truncation=__snake_case , return_tensors=__snake_case ) self.assertEqual(3_2 , targets["""input_ids"""].shape[1] ) def lowerCamelCase__ ( self : List[str] ) -> int: __magic_name__: str = self.ta_base_tokenizer __magic_name__: Union[str, Any] = ["""A long paragraph for summarization. </s>"""] __magic_name__: Any = ["""Summary of the text. </s>"""] # fmt: off __magic_name__: int = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1] __magic_name__: List[str] = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1] # fmt: on __magic_name__: Optional[int] = tokenizer(__snake_case , text_target=__snake_case ) self.assertEqual(__snake_case , batch["""input_ids"""][0] ) self.assertEqual(__snake_case , batch["""labels"""][0] ) def lowerCamelCase__ ( self : Dict ) -> str: # safety check on max_len default value so we are sure the test works __magic_name__: int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test __magic_name__: Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__: Optional[Any] = tempfile.mkdtemp() __magic_name__: str = """ He is very happy, UNwant\u00E9d,running""" __magic_name__: Dict = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) tokenizer.save_pretrained(__snake_case ) __magic_name__: int = tokenizer.__class__.from_pretrained(__snake_case ) __magic_name__: Any = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) shutil.rmtree(__snake_case ) __magic_name__: Optional[int] = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__: Optional[int] = tempfile.mkdtemp() __magic_name__: str = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) __magic_name__: List[str] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __magic_name__: List[Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) tokenizer.save_pretrained(__snake_case ) __magic_name__: List[str] = tokenizer.__class__.from_pretrained(__snake_case ) __magic_name__: Any = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) __magic_name__: Optional[int] = tokenizer.__class__.from_pretrained(__snake_case , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(__snake_case ) def lowerCamelCase__ ( self : str ) -> int: __magic_name__: Optional[int] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__snake_case ) with open(os.path.join(__snake_case , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __magic_name__: Any = json.load(__snake_case ) with open(os.path.join(__snake_case , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __magic_name__: Any = json.load(__snake_case ) __magic_name__: Union[str, Any] = [F'<extra_id_{i}>' for i in range(1_2_5 )] __magic_name__: List[str] = added_tokens_extra_ids + [ """an_additional_special_token""" ] __magic_name__: List[Any] = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(__snake_case , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__snake_case , __snake_case ) with open(os.path.join(__snake_case , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__snake_case , __snake_case ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__: Dict = tokenizer_class.from_pretrained( __snake_case , ) self.assertIn( """an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__: Optional[int] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__snake_case )] __magic_name__: List[Any] = tokenizer_class.from_pretrained( __snake_case , additional_special_tokens=__snake_case , ) self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , ) def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]: __magic_name__: List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__snake_case ) __magic_name__: Dict = tokenizer_class.from_pretrained(__snake_case ) self.assertTrue(tokenizer.decode([2_5_5] ) == """""" ) def lowerCamelCase__ ( self : str ) -> str: pass def lowerCamelCase__ ( self : str ) -> Tuple: pass def lowerCamelCase__ ( self : Dict ) -> Optional[Any]: pass def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]: pass def lowerCamelCase__ ( self : List[Any] ) -> List[str]: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __magic_name__: Tuple = self.get_tokenizers(fast=__snake_case , do_lower_case=__snake_case ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__: Any = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] __magic_name__: Optional[Any] = tokenizer.convert_tokens_to_string(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) def lowerCamelCase__ ( self : List[Any] ) -> Any: __magic_name__: Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __magic_name__: str = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __magic_name__: List[Any] = 0 __magic_name__: Optional[int] = tokenizer.convert_ids_to_tokens( __snake_case , skip_special_tokens=__snake_case ) for attr in attributes_list: setattr(__snake_case , attr + """_id""" , __snake_case ) self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case ) self.assertEqual(getattr(__snake_case , attr + """_id""" ) , __snake_case ) setattr(__snake_case , attr + """_id""" , __snake_case ) self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case ) self.assertEqual(getattr(__snake_case , attr + """_id""" ) , __snake_case ) setattr(__snake_case , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__snake_case , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__snake_case , """additional_special_tokens_ids""" ) , [] ) setattr(__snake_case , """additional_special_tokens_ids""" , [token_id_to_test_setters] ) self.assertListEqual(getattr(__snake_case , """additional_special_tokens""" ) , [token_to_test_setters] ) self.assertListEqual(getattr(__snake_case , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
96
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def snake_case_ ( self : Tuple ) -> Optional[int]: _A = tempfile.mkdtemp() _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _A = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _A = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def snake_case_ ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : int ) -> Optional[Any]: _A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case_ ( self : Dict ) -> List[str]: _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = self.get_image_processor() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) _A = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def snake_case_ ( self : List[Any] ) -> List[str]: _A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _A = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[Any]: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(__lowerCAmelCase , return_tensors='''np''' ) _A = processor(images=__lowerCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = processor(text=__lowerCAmelCase ) _A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case_ ( self : List[str] ) -> Any: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def snake_case_ ( self : Optional[Any] ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(__lowerCAmelCase ) _A = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> str: _A = self.get_image_processor() _A = self.get_tokenizer() _A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _A = '''lower newer''' _A = self.prepare_image_inputs() _A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
0
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowercase__( UpperCAmelCase ): """simple docstring""" def _lowercase ( self : List[str] ) -> Optional[int]: lowercase_ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''depth_multiplier''' ) ) class lowercase__: """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=1_3 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_2 , SCREAMING_SNAKE_CASE_ : Any=0.25 , SCREAMING_SNAKE_CASE_ : Dict=8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : List[str]=6 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : str="relu6" , SCREAMING_SNAKE_CASE_ : int=1_2_8_0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE_ : Tuple=None , ) -> Union[str, Any]: lowercase_ = parent lowercase_ = batch_size lowercase_ = num_channels lowercase_ = image_size lowercase_ = depth_multiplier lowercase_ = depth_divisible_by lowercase_ = min_depth lowercase_ = expand_ratio lowercase_ = tf_padding lowercase_ = output_stride lowercase_ = first_layer_is_expansion lowercase_ = finegrained_output lowercase_ = hidden_act lowercase_ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) lowercase_ = classifier_dropout_prob lowercase_ = use_labels lowercase_ = is_training lowercase_ = num_labels lowercase_ = initializer_range lowercase_ = scope def _lowercase ( self : int ) -> str: lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.num_labels ) lowercase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowercase_ = self.get_config() return config, pixel_values, labels, pixel_labels def _lowercase ( self : int ) -> Tuple: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: lowercase_ = MobileNetVaModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowercase_ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]: lowercase_ = self.num_labels lowercase_ = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict: lowercase_ = self.num_labels lowercase_ = MobileNetVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowercase_ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowercase ( self : Optional[Any] ) -> Dict: lowercase_ = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs lowercase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :List[str] = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) a :Optional[Any] = ( { 'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification, 'image-segmentation': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) a :Optional[Any] = False a :List[str] = False a :List[Any] = False a :Any = False def _lowercase ( self : int ) -> str: lowercase_ = MobileNetVaModelTester(self ) lowercase_ = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[int] ) -> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' ) def _lowercase ( self : str ) -> str: pass @unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' ) def _lowercase ( self : List[Any] ) -> Any: pass @unittest.skip(reason='''MobileNetV2 does not output attentions''' ) def _lowercase ( self : str ) -> Optional[Any]: pass def _lowercase ( self : Tuple ) -> int: lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) lowercase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ = [*signature.parameters.keys()] lowercase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> List[Any]: lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ) -> List[Any]: def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ): lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = outputs.hidden_states lowercase_ = 1_6 self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : str ) -> List[Any]: lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple ) -> Optional[Any]: lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) @slow def _lowercase ( self : Tuple ) -> Optional[int]: for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def a ( ): '''simple docstring''' lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowercase__( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Union[str, Any] ) -> List[Any]: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None ) @slow def _lowercase ( self : Dict ) -> List[str]: lowercase_ = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.default_image_processor lowercase_ = prepare_img() lowercase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): lowercase_ = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits lowercase_ = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) @slow def _lowercase ( self : Optional[int] ) -> Union[str, Any]: lowercase_ = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) lowercase_ = model.to(SCREAMING_SNAKE_CASE_ ) lowercase_ = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) lowercase_ = prepare_img() lowercase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): lowercase_ = model(**SCREAMING_SNAKE_CASE_ ) lowercase_ = outputs.logits # verify the logits lowercase_ = torch.Size((1, 2_1, 6_5, 6_5) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.tensor( [ [[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]], [[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]], [[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]], ] , device=SCREAMING_SNAKE_CASE_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
97
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCamelCase__ ( _A): """simple docstring""" a__ : int = "openai-gpt" a__ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]: _A = vocab_size _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = afn _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_first_dropout _A = summary_proj_to_labels super().__init__(**__lowerCAmelCase )
2
0
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean lowercase__ : Union[str, Any] = 0 lowercase__ : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase__ : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right lowercase__ : Any = tuple[int, int] class __lowerCAmelCase : """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Node | None , ) -> None: '''simple docstring''' _UpperCamelCase = pos_x _UpperCamelCase = pos_y _UpperCamelCase = (pos_y, pos_x) _UpperCamelCase = goal_x _UpperCamelCase = goal_y _UpperCamelCase = g_cost _UpperCamelCase = parent _UpperCamelCase = self.calculate_heuristic() _UpperCamelCase = self.g_cost + self.h_cost def snake_case__ ( self : Union[str, Any] ) -> float: '''simple docstring''' _UpperCamelCase = self.pos_x - self.goal_x _UpperCamelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCAmelCase__ ) + abs(lowerCAmelCase__ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : str , lowerCAmelCase__ : Node ) -> bool: '''simple docstring''' return self.f_cost < other.f_cost class __lowerCAmelCase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : TPosition , lowerCAmelCase__ : TPosition ) -> List[str]: '''simple docstring''' _UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase__ ) _UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , lowerCAmelCase__ ) _UpperCamelCase = [self.start] _UpperCamelCase = [] _UpperCamelCase = False def snake_case__ ( self : List[Any] ) -> list[TPosition]: '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() _UpperCamelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowerCAmelCase__ ) self.closed_nodes.append(lowerCAmelCase__ ) _UpperCamelCase = self.get_successors(lowerCAmelCase__ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCAmelCase__ ) else: # retrieve the best current path _UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCAmelCase__ ) else: self.open_nodes.append(lowerCAmelCase__ ) return [self.start.pos] def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Node ) -> list[Node]: '''simple docstring''' _UpperCamelCase = [] for action in delta: _UpperCamelCase = parent.pos_x + action[1] _UpperCamelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase__ , ) ) return successors def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Node | None ) -> list[TPosition]: '''simple docstring''' _UpperCamelCase = node _UpperCamelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _UpperCamelCase = current_node.parent path.reverse() return path class __lowerCAmelCase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : TPosition , lowerCAmelCase__ : TPosition ) -> None: '''simple docstring''' _UpperCamelCase = AStar(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = AStar(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = False def snake_case__ ( self : int ) -> list[TPosition]: '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() _UpperCamelCase = self.fwd_astar.open_nodes.pop(0 ) _UpperCamelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCAmelCase__ , lowerCAmelCase__ ) self.fwd_astar.closed_nodes.append(lowerCAmelCase__ ) self.bwd_astar.closed_nodes.append(lowerCAmelCase__ ) _UpperCamelCase = current_bwd_node _UpperCamelCase = current_fwd_node _UpperCamelCase = { self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase__ ), self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase__ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCAmelCase__ ) else: # retrieve the best current path _UpperCamelCase = astar.open_nodes.pop( astar.open_nodes.index(lowerCAmelCase__ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCAmelCase__ ) else: astar.open_nodes.append(lowerCAmelCase__ ) return [self.fwd_astar.start.pos] def snake_case__ ( self : List[str] , lowerCAmelCase__ : Node , lowerCAmelCase__ : Node ) -> list[TPosition]: '''simple docstring''' _UpperCamelCase = self.fwd_astar.retrace_path(lowerCAmelCase__ ) _UpperCamelCase = self.bwd_astar.retrace_path(lowerCAmelCase__ ) bwd_path.pop() bwd_path.reverse() _UpperCamelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] lowercase__ : List[Any] = (0, 0) lowercase__ : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowercase__ : Any = time.time() lowercase__ : Union[str, Any] = AStar(init, goal) lowercase__ : Optional[Any] = a_star.search() lowercase__ : List[Any] = time.time() - start_time print(F"""AStar execution time = {end_time:f} seconds""") lowercase__ : Tuple = time.time() lowercase__ : List[str] = BidirectionalAStar(init, goal) lowercase__ : List[Any] = time.time() - bd_start_time print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
98
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def snake_case_ ( self : Optional[int] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict: if not batched: _A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0] _A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( _A , unittest.TestCase): """simple docstring""" a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[int] ) -> Any: _A = DeformableDetrImageProcessingTester(self ) @property def snake_case_ ( self : Union[str, Any] ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[int] ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) ) def snake_case_ ( self : List[str] ) -> int: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __lowerCAmelCase ) def snake_case_ ( self : Any ) -> Union[str, Any]: pass def snake_case_ ( self : List[str] ) -> Optional[int]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Tuple ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self : Optional[Any] ) -> int: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case_ ( self : Optional[Any] ) -> Optional[int]: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) ) @slow def snake_case_ ( self : List[str] ) -> List[str]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
2
0
def a (lowerCAmelCase__ ): assert column_title.isupper() __a = 0 __a = len(lowerCAmelCase__ ) - 1 __a = 0 while index >= 0: __a = (ord(column_title[index] ) - 64) * pow(26 , lowerCAmelCase__ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
99
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
0
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : Dict = BioGptTokenizer lowerCamelCase__ : List[Any] = False def lowercase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] SCREAMING_SNAKE_CASE__ = dict(zip(A_ , range(len(A_ ) ) ) ) SCREAMING_SNAKE_CASE__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(A_ ) ) def lowercase_ ( self , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''lower newer''' SCREAMING_SNAKE_CASE__ = '''lower newer''' return input_text, output_text def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = BioGptTokenizer(self.vocab_file , self.merges_file ) SCREAMING_SNAKE_CASE__ = '''lower''' SCREAMING_SNAKE_CASE__ = ['''low''', '''er</w>'''] SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = tokens + ['''<unk>'''] SCREAMING_SNAKE_CASE__ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) @slow def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) SCREAMING_SNAKE_CASE__ = tokenizer.encode('''sequence builders''' , add_special_tokens=A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ , A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
100
import itertools import math def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( ) -> Dict: _A = 2 while True: if is_prime(_snake_case ): yield num num += 1 def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) ) if __name__ == "__main__": print(f'{solution() = }')
2
0