code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCamelCase = logging.get_logger(__name__) class _A ( __lowercase ): lowercase__: List[Any] = ['''pixel_values'''] def __init__( self : Any , __magic_name__ : bool = True , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_55 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : Any , ) -> None: """simple docstring""" super().__init__(**__magic_name__ ) __snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 2_56} __snake_case : Dict = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) __snake_case : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} __snake_case : Any = get_size_dict(__magic_name__ ) __snake_case : Optional[Any] = do_resize __snake_case : Dict = size __snake_case : Any = resample __snake_case : str = do_center_crop __snake_case : Optional[int] = crop_size __snake_case : int = do_rescale __snake_case : Optional[Any] = rescale_factor __snake_case : Any = do_normalize __snake_case : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray: """simple docstring""" __snake_case : Optional[Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __snake_case : Union[str, Any] = get_resize_output_image_size(__magic_name__ , size=size["""shortest_edge"""] , default_to_square=__magic_name__ ) return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowercase__ ( self : str , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] , ) -> np.ndarray: """simple docstring""" __snake_case : int = get_size_dict(__magic_name__ ) return center_crop(__magic_name__ , size=(size["""height"""], size["""width"""]) , data_format=__magic_name__ , **__magic_name__ ) def lowercase__ ( self : str , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Any ) -> np.ndarray: """simple docstring""" return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray: """simple docstring""" return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowercase__ ( self : int , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ : List[Any] , ) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize __snake_case : Optional[int] = size if size is not None else self.size __snake_case : Any = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) __snake_case : List[Any] = resample if resample is not None else self.resample __snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case : str = crop_size if crop_size is not None else self.crop_size __snake_case : str = get_size_dict(__magic_name__ ) __snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Tuple = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[Any] = image_mean if image_mean is not None else self.image_mean __snake_case : Tuple = image_std if image_std is not None else self.image_std __snake_case : int = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __snake_case : str = [to_numpy_array(__magic_name__ ) for image in images] if do_resize: __snake_case : int = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images] if do_center_crop: __snake_case : Any = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images] if do_rescale: __snake_case : List[Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images] if do_normalize: __snake_case : Tuple = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images] __snake_case : Dict = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] __snake_case : Union[str, Any] = {"""pixel_values""": images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
26
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _a ( _lowerCamelCase ) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case , __snake_case : Dict = emb.weight.shape __snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase ) __snake_case : Union[str, Any] = emb.weight.data return lin_layer def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = {} for old_key in state_dict.keys(): __snake_case : Union[str, Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: __snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: __snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: __snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: __snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: __snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: __snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: __snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: __snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) __snake_case : str = state_dict[old_key] return new_dict def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict: """simple docstring""" __snake_case : Optional[int] = [] __snake_case : Dict = 0 os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) for expert in range(_lowerCamelCase ): __snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowerCamelCase ): __snake_case : Dict = torch.load(_lowerCamelCase )["""model"""] remove_ignore_keys_(_lowerCamelCase ) __snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase ) __snake_case : List[Any] = os.path.join( _lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) ) torch.save(_lowerCamelCase , _lowerCamelCase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowerCamelCase )[0]].dtype ) # Add the last block __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) ) __snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowerCamelCase ) __snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase ) __snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowerCamelCase ) == 1: __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase ) torch.save(_lowerCamelCase , _lowerCamelCase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowerCamelCase , _lowerCamelCase ) # Otherwise, let's build the index __snake_case : Tuple = {} for idx, shard in enumerate(_lowerCamelCase ): __snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' ) __snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) ) for key in shard: __snake_case : str = shard_file # Add the metadata __snake_case : Optional[Any] = {"""total_size""": total_size} __snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f: __snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n""" f.write(_lowerCamelCase ) return metadata, index if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--nllb_moe_checkpoint_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b", type=str, required=False, help="Path to the output pytorch model.", ) __UpperCamelCase = parser.parse_args() __UpperCamelCase , __UpperCamelCase = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) __UpperCamelCase = NllbMoeConfig.from_pretrained( "facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) __UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("Done") model.save_pretrained(args.pytorch_dump_folder_path)
26
1
from __future__ import annotations from scipy.special import comb # type: ignore class A_ : """simple docstring""" def __init__( self :str , lowerCAmelCase__ :list[tuple[float, float]] ) -> str: '''simple docstring''' snake_case_ : Any = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. snake_case_ : Optional[Any] = len(lowerCAmelCase__ ) - 1 def _A ( self :int , lowerCAmelCase__ :float ) -> list[float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." snake_case_ : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , lowerCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(lowerCAmelCase__ ) , 5 ) == 1 return output_values def _A ( self :List[Any] , lowerCAmelCase__ :float ) -> tuple[float, float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." snake_case_ : Optional[int] = self.basis_function(lowerCAmelCase__ ) snake_case_ : Any = 0.0 snake_case_ : str = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _A ( self :Tuple , lowerCAmelCase__ :float = 0.0_1 ) -> Union[str, Any]: '''simple docstring''' from matplotlib import pyplot as plt # type: ignore snake_case_ : list[float] = [] # x coordinates of points to plot snake_case_ : list[float] = [] # y coordinates of points to plot snake_case_ : Optional[int] = 0.0 while t <= 1: snake_case_ : Optional[int] = self.bezier_curve_function(lowerCAmelCase__ ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size snake_case_ : str = [i[0] for i in self.list_of_points] snake_case_ : int = [i[1] for i in self.list_of_points] plt.plot( lowerCAmelCase__ , lowerCAmelCase__ , color="blue" , label="Curve of Degree " + str(self.degree ) , ) plt.scatter(lowerCAmelCase__ , lowerCAmelCase__ , color="red" , label="Control Points" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
715
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MgpstrTokenizer a__ = False a__ = {} a__ = False def _A ( self :List[str] ) -> List[str]: '''simple docstring''' super().setUp() # fmt: off snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str: '''simple docstring''' snake_case_ : Dict = "tester" snake_case_ : Tuple = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def _A ( self :Dict ) -> str: '''simple docstring''' pass def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : Tuple = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 1 ) snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) self.assertTrue(special_token not in decoded ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertNotEqual(len(lowerCAmelCase__ ) , 0 ) snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def _A ( self :Union[str, Any] ) -> Any: '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def _A ( self :int ) -> Dict: '''simple docstring''' pass
656
0
"""simple docstring""" import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : Dict = { """microsoft/conditional-detr-resnet-50""": ( """https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json""" ), } class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "conditional_detr" __UpperCamelCase = ["past_key_values"] __UpperCamelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , _a=True , _a=None , _a=3 , _a=300 , _a=6 , _a=2_048 , _a=8 , _a=6 , _a=2_048 , _a=8 , _a=0.0 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=1.0 , _a=False , _a="sine" , _a="resnet50" , _a=True , _a=False , _a=2 , _a=5 , _a=2 , _a=1 , _a=1 , _a=2 , _a=5 , _a=2 , _a=0.25 , **_a , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(_a , _a ): lowerCamelCase = backbone_config.get("""model_type""" ) lowerCamelCase = CONFIG_MAPPING[backbone_model_type] lowerCamelCase = config_class.from_dict(_a ) lowerCamelCase = use_timm_backbone lowerCamelCase = backbone_config lowerCamelCase = num_channels lowerCamelCase = num_queries lowerCamelCase = d_model lowerCamelCase = encoder_ffn_dim lowerCamelCase = encoder_layers lowerCamelCase = encoder_attention_heads lowerCamelCase = decoder_ffn_dim lowerCamelCase = decoder_layers lowerCamelCase = decoder_attention_heads lowerCamelCase = dropout lowerCamelCase = attention_dropout lowerCamelCase = activation_dropout lowerCamelCase = activation_function lowerCamelCase = init_std lowerCamelCase = init_xavier_std lowerCamelCase = encoder_layerdrop lowerCamelCase = decoder_layerdrop lowerCamelCase = encoder_layers lowerCamelCase = auxiliary_loss lowerCamelCase = position_embedding_type lowerCamelCase = backbone lowerCamelCase = use_pretrained_backbone lowerCamelCase = dilation # Hungarian matcher lowerCamelCase = class_cost lowerCamelCase = bbox_cost lowerCamelCase = giou_cost # Loss coefficients lowerCamelCase = mask_loss_coefficient lowerCamelCase = dice_loss_coefficient lowerCamelCase = cls_loss_coefficient lowerCamelCase = bbox_loss_coefficient lowerCamelCase = giou_loss_coefficient lowerCamelCase = focal_alpha super().__init__(is_encoder_decoder=_a , **_a ) @property def _lowerCAmelCase ( self ): """simple docstring""" return self.encoder_attention_heads @property def _lowerCAmelCase ( self ): """simple docstring""" return self.d_model def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase = self.backbone_config.to_dict() lowerCamelCase = self.__class__.model_type return output class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = version.parse("1.11" ) @property def _lowerCAmelCase ( self ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def _lowerCAmelCase ( self ): """simple docstring""" return 1e-5 @property def _lowerCAmelCase ( self ): """simple docstring""" return 12
543
"""simple docstring""" import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : int = """▁""" lowerCAmelCase : str = {"""vocab_file""": """prophetnet.tokenizer"""} lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer""" ), } } lowerCAmelCase : Any = { """microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False}, } lowerCAmelCase : List[Any] = { """microsoft/xprophetnet-large-wiki100-cased""": 512, } def a__ ( snake_case__ ) -> int: lowerCamelCase = collections.OrderedDict() with open(snake_case__ , """r""" , encoding="""utf-8""" ) as reader: lowerCamelCase = reader.readlines() for index, token in enumerate(snake_case__ ): lowerCamelCase = token.rstrip("""\n""" ) lowerCamelCase = index return vocab class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self , _a , _a="[SEP]" , _a="[SEP]" , _a="[SEP]" , _a="[UNK]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ): """simple docstring""" lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_a , eos_token=_a , sep_token=_a , unk_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) try: import sentencepiece as spm except ImportError: logger.warning( """You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece""" """ pip install sentencepiece""" ) raise lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_a ) ) lowerCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab lowerCamelCase = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4} for i in range(10 ): lowerCamelCase = f'[unused{i}]' lowerCamelCase = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab lowerCamelCase = 12 lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(_a ) def __getstate__( self ): """simple docstring""" lowerCamelCase = self.__dict__.copy() lowerCamelCase = None return state def __setstate__( self , _a ): """simple docstring""" lowerCamelCase = d try: import sentencepiece as spm except ImportError: logger.warning( """You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece""" """ pip install sentencepiece""" ) raise # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCamelCase = {} lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , _a , _a = None , _a = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is None: return ([0] * len(_a )) + [1] return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1] def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" lowerCamelCase = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ): """simple docstring""" return len(self.sp_model ) + self.fairseq_offset def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , _a ): """simple docstring""" return self.sp_model.encode(_a , out_type=_a ) def _lowerCAmelCase ( self , _a ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCamelCase = self.sp_model.PieceToId(_a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCAmelCase ( self , _a ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = """""".join(_a ).replace(_a , """ """ ).strip() return out_string def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _a ) elif not os.path.isfile(self.vocab_file ): with open(_a , """wb""" ) as fi: lowerCamelCase = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def _lowerCAmelCase ( self , _a , _a = None ): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.sep_token_id] lowerCamelCase = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
543
1
'''simple docstring''' class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple ) -> Optional[Any]: A : str = {} def SCREAMING_SNAKE_CASE__ ( self : int ) -> None: print(self.vertex ) for i in self.vertex: print(lowercase_ , " -> " , " -> ".join([str(lowercase_ ) for j in self.vertex[i]] ) ) def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> None: if from_vertex in self.vertex: self.vertex[from_vertex].append(lowercase_ ) else: # else make a new vertex A : Optional[Any] = [to_vertex] def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> None: A : Dict = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : list ) -> None: A : List[str] = True print(lowercase_ , end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(lowercase_ , lowercase_ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
715
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class lowerCamelCase_ ( _A ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) a__ = Features({"question": Value("string" ), "context": Value("string" )} ) a__ = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) a__ = "question" a__ = "context" a__ = "answers" @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
17
0
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ): __lowerCAmelCase : List[Any] = 'pixel_values' __lowerCAmelCase : Union[str, Any] = False __lowerCAmelCase : List[Any] = TimmBackboneConfig def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , """timm""") super().__init__(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""") if config.backbone not in timm.list_models(): raise ValueError(f'backbone {config.backbone} is not supported by timm.') if hasattr(SCREAMING_SNAKE_CASE_ , """out_features""") and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""") lowercase__ : Any = getattr(SCREAMING_SNAKE_CASE_ , """use_pretrained_backbone""" , SCREAMING_SNAKE_CASE_) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""") # We just take the final layer by default. This matches the default for the transformers models. lowercase__ : Optional[int] = config.out_indices if getattr(SCREAMING_SNAKE_CASE_ , """out_indices""" , SCREAMING_SNAKE_CASE_) is not None else (-1,) lowercase__ : List[str] = timm.create_model( config.backbone , pretrained=SCREAMING_SNAKE_CASE_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. lowercase__ : List[str] = self._backbone.return_layers lowercase__ : Union[str, Any] = {layer["""module"""]: str(SCREAMING_SNAKE_CASE_) for i, layer in enumerate(self._backbone.feature_info.info)} super()._init_backbone(SCREAMING_SNAKE_CASE_) @classmethod def lowercase__ ( cls , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""vision""", """timm"""]) from ...models.timm_backbone import TimmBackboneConfig lowercase__ : Dict = kwargs.pop("""config""" , TimmBackboneConfig()) lowercase__ : List[str] = kwargs.pop("""use_timm_backbone""" , SCREAMING_SNAKE_CASE_) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""") lowercase__ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels) lowercase__ : Optional[int] = kwargs.pop("""features_only""" , config.features_only) lowercase__ : int = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone) lowercase__ : Any = kwargs.pop("""out_indices""" , config.out_indices) lowercase__ : List[str] = TimmBackboneConfig( backbone=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , features_only=SCREAMING_SNAKE_CASE_ , use_pretrained_backbone=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , ) return super()._from_config(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' pass def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""") if output_hidden_states: # We modify the return layers to include all the stages of the backbone lowercase__ : List[str] = self._all_layers lowercase__ : Optional[int] = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self._return_layers lowercase__ : Optional[Any] = tuple(hidden_states[i] for i in self.out_indices) else: lowercase__ : int = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = None lowercase__ : Optional[Any] = tuple(SCREAMING_SNAKE_CASE_) lowercase__ : int = tuple(SCREAMING_SNAKE_CASE_) if hidden_states is not None else None if not return_dict: lowercase__ : Union[str, Any] = (feature_maps,) if output_hidden_states: lowercase__ : Optional[Any] = output + (hidden_states,) return output return BackboneOutput(feature_maps=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , attentions=SCREAMING_SNAKE_CASE_)
12
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets A = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' A = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' A = R''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def _lowerCamelCase ( self : List[Any] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string' ), 'references': datasets.Value('string' ), } ) ,homepage='https://github.com/hendrycks/math' ,codebase_urls=['https://github.com/hendrycks/math'] ,) def _lowerCamelCase ( self : int ,UpperCamelCase : int ,UpperCamelCase : Optional[int] ) -> Optional[Any]: _lowercase : Optional[int] = 0.0 for i, j in zip(UpperCamelCase ,UpperCamelCase ): n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase ,UpperCamelCase ) else 0.0 _lowercase : Any = n_correct / len(UpperCamelCase ) return { "accuracy": accuracy, }
125
0
from PIL import Image def lowercase_ ( __snake_case : Image , __snake_case : int ) -> Image: '''simple docstring''' snake_case__ :int = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level)) def contrast(__snake_case : int ) -> int: return int(1_28 + factor * (c - 1_28) ) return img.point(__snake_case ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change contrast to 170 __UpperCAmelCase : Dict = change_contrast(img, 1_7_0) cont_img.save("image_data/lena_high_contrast.png", format="png")
57
def lowercase_ ( __snake_case : list ) -> list: '''simple docstring''' if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ): raise TypeError("Sequence must be list of non-negative integers" ) for _ in range(len(__snake_case ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
57
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : List[str] = { "configuration_autoformer": [ "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "AutoformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "AutoformerForPrediction", "AutoformerModel", "AutoformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
205
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowercase ( snake_case_ ): lowercase = ['image_processor', 'tokenizer'] lowercase = 'BlipImageProcessor' lowercase = 'AutoTokenizer' def __init__( self : Optional[int] , snake_case : Tuple , snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ : Tuple = False super().__init__(snake_case , snake_case ) UpperCamelCase_ : Optional[Any] = self.image_processor def __call__( self : str , snake_case : ImageInput = None , snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case : bool = True , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Union[bool, str, TruncationStrategy] = None , snake_case : Optional[int] = None , snake_case : int = 0 , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = True , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : str , ) -> BatchEncoding: """simple docstring""" if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: UpperCamelCase_ : Union[str, Any] = self.tokenizer UpperCamelCase_ : List[Any] = self.tokenizer( text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_token_type_ids=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , ) return text_encoding # add pixel_values UpperCamelCase_ : Union[str, Any] = self.image_processor(snake_case , return_tensors=snake_case ) if text is not None: UpperCamelCase_ : Optional[Any] = self.tokenizer( text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_token_type_ids=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , ) else: UpperCamelCase_ : Any = None if text_encoding is not None: encoding_image_processor.update(snake_case ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case : Union[str, Any] , **snake_case : Any ) -> str: """simple docstring""" return self.tokenizer.batch_decode(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case : str , **snake_case : Tuple ) -> List[str]: """simple docstring""" return self.tokenizer.decode(*snake_case , **snake_case ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCamelCase_ : Any = self.tokenizer.model_input_names UpperCamelCase_ : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
417
0
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase__ ( self ) -> Optional[Any]: A = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCamelCase_ ,"""hidden_sizes""" ) ) self.parent.assertTrue(hasattr(lowerCamelCase_ ,"""num_attention_heads""" ) ) self.parent.assertTrue(hasattr(lowerCamelCase_ ,"""num_encoder_blocks""" ) ) class lowerCamelCase__ : '''simple docstring''' def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=1_3 ,lowerCamelCase_=6_4 ,lowerCamelCase_=3 ,lowerCamelCase_=4 ,lowerCamelCase_=[2, 2, 2, 2] ,lowerCamelCase_=[8, 4, 2, 1] ,lowerCamelCase_=[1_6, 3_2, 6_4, 1_2_8] ,lowerCamelCase_=[1, 4, 8, 1_6] ,lowerCamelCase_=[1, 2, 4, 8] ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.02 ,lowerCamelCase_=3 ,lowerCamelCase_=None ,) -> Optional[Any]: A = parent A = batch_size A = image_size A = num_channels A = num_encoder_blocks A = sr_ratios A = depths A = hidden_sizes A = downsampling_rates A = num_attention_heads A = is_training A = use_labels A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = num_labels A = scope def UpperCamelCase__ ( self ) -> List[Any]: A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) A = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ) -> str: return SegformerConfig( image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[Any]: A = SegformerModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() A = model(lowerCamelCase_ ) A = A = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int: A = self.num_labels A = SegformerForSemanticSegmentation(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() A = model(lowerCamelCase_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) A = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss ,0.0 ) def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]: A = 1 A = SegformerForSemanticSegmentation(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() A = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase_ ) A = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertGreater(result.loss ,0.0 ) def UpperCamelCase__ ( self ) -> str: A = self.prepare_config_and_inputs() A , A , A = config_and_inputs A = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _lowerCamelCase = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) _lowerCamelCase = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def UpperCamelCase__ ( self ) -> str: A = SegformerModelTester(self ) A = SegformerConfigTester(self ,config_class=lowerCamelCase_ ) def UpperCamelCase__ ( self ) -> Any: self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def UpperCamelCase__ ( self ) -> int: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase_ ) def UpperCamelCase__ ( self ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase_ ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def UpperCamelCase__ ( self ) -> Optional[int]: pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def UpperCamelCase__ ( self ) -> Dict: pass def UpperCamelCase__ ( self ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(lowerCamelCase_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def UpperCamelCase__ ( self ) -> int: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = True for model_class in self.all_model_classes: A = True A = False A = True A = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) A = outputs.attentions A = sum(self.model_tester.depths ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A = True A = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) A = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # verify the first attentions (first block, first layer) A = (self.model_tester.image_size // 4) ** 2 A = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,) # verify the last attentions (last block, last layer) A = (self.model_tester.image_size // 3_2) ** 2 A = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,) A = len(lowerCamelCase_ ) # Check attention is always last and order is fine A = True A = True A = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) self.assertEqual(out_len + 1 ,len(lowerCamelCase_ ) ) A = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # verify the first attentions (first block, first layer) A = (self.model_tester.image_size // 4) ** 2 A = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,) def UpperCamelCase__ ( self ) -> int: def check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ): A = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) A = outputs.hidden_states A = self.model_tester.num_encoder_blocks self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) ,[ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] ,) A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = True check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A = True check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def UpperCamelCase__ ( self ) -> Optional[Any]: if not self.model_tester.is_training: return A , A = self.model_tester.prepare_config_and_inputs_for_common() A = True for model_class in self.all_model_classes: if model_class in get_values(lowerCamelCase_ ): continue A = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.train() A = self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ ) A = model(**lowerCamelCase_ ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase__ ( self ) -> Optional[int]: pass @slow def UpperCamelCase__ ( self ) -> Optional[Any]: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = SegformerModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def _A ( ): """simple docstring""" A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ ( self ) -> int: # only resize + normalize A = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) ,keep_ratio=lowerCamelCase_ ,align=lowerCamelCase_ ,do_random_crop=lowerCamelCase_ ) A = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( lowerCamelCase_ ) A = prepare_img() A = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ) A = encoded_inputs.pixel_values.to(lowerCamelCase_ ) with torch.no_grad(): A = model(lowerCamelCase_ ) A = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) A = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,lowerCamelCase_ ,atol=1E-4 ) ) @slow def UpperCamelCase__ ( self ) -> Optional[int]: # only resize + normalize A = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) ,keep_ratio=lowerCamelCase_ ,align=lowerCamelCase_ ,do_random_crop=lowerCamelCase_ ) A = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(lowerCamelCase_ ) A = prepare_img() A = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ) A = encoded_inputs.pixel_values.to(lowerCamelCase_ ) with torch.no_grad(): A = model(lowerCamelCase_ ) A = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) A = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,lowerCamelCase_ ,atol=1E-1 ) ) @slow def UpperCamelCase__ ( self ) -> Union[str, Any]: # only resize + normalize A = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) ,keep_ratio=lowerCamelCase_ ,align=lowerCamelCase_ ,do_random_crop=lowerCamelCase_ ) A = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( lowerCamelCase_ ) A = prepare_img() A = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ) A = encoded_inputs.pixel_values.to(lowerCamelCase_ ) with torch.no_grad(): A = model(lowerCamelCase_ ) A = outputs.logits.detach().cpu() A = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ ,target_sizes=[(5_0_0, 3_0_0)] ) A = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape ,lowerCamelCase_ ) A = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ ) A = torch.Size((1_2_8, 1_2_8) ) self.assertEqual(segmentation[0].shape ,lowerCamelCase_ )
716
"""simple docstring""" import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase =logging.get_logger(__name__) UpperCAmelCase ="https://openaipublic.azureedge.net/jukebox/models/" UpperCAmelCase ={ "jukebox-1b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "1b_lyrics/prior_level_2.pth.tar", ], "jukebox-5b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "5b_lyrics/prior_level_2.pth.tar", ], } def _A ( _a : Optional[Any] ): """simple docstring""" if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 1_0: A = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" ) elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 1_0: A = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" ) elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 1_0: A = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" ) elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 1_0: A = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" ) if "conditioner_blocks.0." in key: A = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" ) if "prime_prior" in key: A = key.replace("""prime_prior""" , """encoder""" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: A = key.replace(""".emb.""" , """.""" ) if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(""".k""" , """.codebook""" ) if "y_emb." in key: return key.replace("""y_emb.""" , """metadata_embedding.""" ) if "x_emb.emb." in key: A = key.replace("""0.x_emb.emb""" , """embed_tokens""" ) if "prime_state_ln" in key: return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" ) if ".ln" in key: return key.replace(""".ln""" , """.layer_norm""" ) if "_ln" in key: return key.replace("""_ln""" , """_layer_norm""" ) if "prime_state_proj" in key: return key.replace("""prime_state_proj""" , """encoder.proj_in""" ) if "prime_x_out" in key: return key.replace("""prime_x_out""" , """encoder.lm_head""" ) if "prior.x_out" in key: return key.replace("""x_out""" , """fc_proj_out""" ) if "x_emb" in key: return key.replace("""x_emb""" , """embed_tokens""" ) return key def _A ( _a : Union[str, Any] , _a : Union[str, Any] , _a : Union[str, Any] , _a : List[Any] ): """simple docstring""" A = {} import re A = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) A = re.compile( r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) A = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) A = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) A = re.compile( r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) A = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) A = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" ) A = re.compile( r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) A = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(_a ): A = re_encoder_block_conv_in.match(_a ) A = regex_match.groups() A = int(groups[2] ) * 2 + int(groups[3] ) A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}' A = re_encoder_block_conv_in.sub(_a , _a ) elif re_encoder_block_resnet.fullmatch(_a ): A = re_encoder_block_resnet.match(_a ) A = regex_match.groups() A = int(groups[2] ) * 2 + int(groups[3] ) A = {"""1""": 1, """3""": 2}[groups[-2]] A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.' A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' A = prefix + resnet_block A = re_encoder_block_resnet.sub(_a , _a ) elif re_encoder_block_proj_out.fullmatch(_a ): A = re_encoder_block_proj_out.match(_a ) A = regex_match.groups() A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}' A = re_encoder_block_proj_out.sub(_a , _a ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(_a ): A = re_decoder_block_conv_out.match(_a ) A = regex_match.groups() A = int(groups[2] ) * 2 + int(groups[3] ) - 2 A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}' A = re_decoder_block_conv_out.sub(_a , _a ) elif re_decoder_block_resnet.fullmatch(_a ): A = re_decoder_block_resnet.match(_a ) A = regex_match.groups() A = int(groups[2] ) * 2 + int(groups[3] ) - 2 A = {"""1""": 1, """3""": 2}[groups[-2]] A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.' A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' A = prefix + resnet_block A = re_decoder_block_resnet.sub(_a , _a ) elif re_decoder_block_proj_in.fullmatch(_a ): A = re_decoder_block_proj_in.match(_a ) A = regex_match.groups() A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}' A = re_decoder_block_proj_in.sub(_a , _a ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(_a ): A = re_prior_cond_conv_out.match(_a ) A = regex_match.groups() A = int(groups[1] ) * 2 + int(groups[2] ) - 2 A = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}' A = re_prior_cond_conv_out.sub(_a , _a ) elif re_prior_cond_resnet.fullmatch(_a ): A = re_prior_cond_resnet.match(_a ) A = regex_match.groups() A = int(groups[1] ) * 2 + int(groups[2] ) - 2 A = {"""1""": 1, """3""": 2}[groups[-2]] A = f'conditioner_blocks.upsampler.upsample_block.{block_index}.' A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' A = prefix + resnet_block A = re_prior_cond_resnet.sub(_a , _a ) elif re_prior_cond_proj_in.fullmatch(_a ): A = re_prior_cond_proj_in.match(_a ) A = regex_match.groups() A = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}' A = re_prior_cond_proj_in.sub(_a , _a ) # keep original key else: A = original_key A = replace_key(_a ) if f'{key_prefix}.{key}' not in model_state_dict or key is None: print(f'failed converting {original_key} to {key}, does not match' ) # handle missmatched shape elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape: A = model_state_dict[f'{key_prefix}.{key}'] print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' ) A = original_key A = original_key A = value return new_dict @torch.no_grad() def _A ( _a : Optional[Any]=None , _a : str=None ): """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ): A = requests.get(f'{PREFIX}{file}' , allow_redirects=_a ) os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=_a ) open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , """wb""" ).write(r.content ) A = MODEL_MAPPING[model_name.split("""/""" )[-1]] A = JukeboxConfig.from_pretrained(_a ) A = JukeboxModel(_a ) A = [] A = {} for i, dict_name in enumerate(_a ): A = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["""model"""] A = {} for k in old_dic.keys(): if k.endswith(""".b""" ): A = old_dic[k] elif k.endswith(""".w""" ): A = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: A = old_dic[k] else: A = old_dic[k] A = """vqvae""" if i == 0 else f'priors.{3 - i}' A = fix_jukebox_keys(_a , model.state_dict() , _a , _a ) weight_dict.append(_a ) A = weight_dict.pop(0 ) model.vqvae.load_state_dict(_a ) for i in range(len(_a ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(_a ).mkdir(exist_ok=_a ) with open(f'{pytorch_dump_folder_path}/mapping.json' , """w""" ) as txtfile: json.dump(_a , _a ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_a ) return weight_dict if __name__ == "__main__": UpperCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="jukebox-5b-lyrics", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="jukebox-5b-lyrics-converted", type=str, help="Path to the output PyTorch model directory.", ) UpperCAmelCase =parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
255
0
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: _A : Tuple = None _A : int = logging.get_logger(__name__) _A : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _A : Optional[Any] = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", }, """tokenizer_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""", }, } # TODO(PVP) - this should be removed in Transformers v5 _A : str = { """t5-small""": 5_12, """t5-base""": 5_12, """t5-large""": 5_12, """t5-3b""": 5_12, """t5-11b""": 5_12, } class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ : int = VOCAB_FILES_NAMES lowerCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ : Union[str, Any] = ["""input_ids""", """attention_mask"""] lowerCamelCase__ : Union[str, Any] = TaTokenizer lowerCamelCase__ : List[int] = [] def __init__( self , A_=None , A_=None , A_="</s>" , A_="<unk>" , A_="<pad>" , A_=1_00 , A_=None , **A_ , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: SCREAMING_SNAKE_CASE__ = [f'''<extra_id_{i}>''' for i in range(A_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens SCREAMING_SNAKE_CASE__ = len(set(filter(lambda A_ : bool('''extra_id_''' in str(A_ ) ) , A_ ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( A_ , tokenizer_file=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , extra_ids=A_ , additional_special_tokens=A_ , **A_ , ) SCREAMING_SNAKE_CASE__ = vocab_file SCREAMING_SNAKE_CASE__ = False if not self.vocab_file else True SCREAMING_SNAKE_CASE__ = extra_ids @staticmethod def lowercase_ ( A_ , A_ , A_ ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: SCREAMING_SNAKE_CASE__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f''' {pretrained_model_name_or_path} automatically truncating your input to''' f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , A_ , ) return max_model_length def lowercase_ ( self , A_ , A_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(A_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) logger.info(f'''Copy vocab file to {out_vocab_file}''' ) return (out_vocab_file,) def lowercase_ ( self , A_ , A_ = None ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: SCREAMING_SNAKE_CASE__ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def lowercase_ ( self , A_ , A_ = None ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowercase_ ( self ): '''simple docstring''' return list( set(filter(lambda A_ : bool(re.search(r'''<extra_id_\d+>''' , A_ ) ) is not None , self.additional_special_tokens ) ) ) def lowercase_ ( self ): '''simple docstring''' return [self.convert_tokens_to_ids(A_ ) for token in self.get_sentinel_tokens()]
100
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def _lowerCamelCase( __snake_case , __snake_case ) -> Dict: __snake_case = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def _lowerCamelCase( __snake_case , __snake_case ) -> Dict: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) __snake_case = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" ) __snake_case = in_proj_weight[ : encoder_config.hidden_size, : ] __snake_case = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] __snake_case = in_proj_weight[ -encoder_config.hidden_size :, : ] def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> List[Any]: __snake_case = dct.pop(__snake_case ) __snake_case = val def _lowerCamelCase( __snake_case ) -> str: if "handwritten" in checkpoint_url: __snake_case = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: __snake_case = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" __snake_case = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("RGB" ) return im @torch.no_grad() def _lowerCamelCase( __snake_case , __snake_case ) -> int: __snake_case = ViTConfig(image_size=384 , qkv_bias=__snake_case ) __snake_case = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: __snake_case = 768 elif "large" in checkpoint_url: # use ViT-large encoder __snake_case = 1024 __snake_case = 4096 __snake_case = 24 __snake_case = 16 __snake_case = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: __snake_case = False __snake_case = "relu" __snake_case = 1024 __snake_case = True __snake_case = False __snake_case = False # load HuggingFace model __snake_case = ViTModel(__snake_case , add_pooling_layer=__snake_case ) __snake_case = TrOCRForCausalLM(__snake_case ) __snake_case = VisionEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) model.eval() # load state_dict of original model, rename some keys __snake_case = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" , check_hash=__snake_case )["model"] __snake_case = create_rename_keys(__snake_case , __snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) read_in_q_k_v(__snake_case , __snake_case ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): __snake_case = state_dict.pop(__snake_case ) if key.startswith("decoder" ) and "output_projection" not in key: __snake_case = val else: __snake_case = val # load state dict model.load_state_dict(__snake_case ) # Check outputs on an image __snake_case = ViTImageProcessor(size=encoder_config.image_size ) __snake_case = RobertaTokenizer.from_pretrained("roberta-large" ) __snake_case = TrOCRProcessor(__snake_case , __snake_case ) __snake_case = processor(images=prepare_img(__snake_case ) , return_tensors="pt" ).pixel_values # verify logits __snake_case = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) __snake_case = model(pixel_values=__snake_case , decoder_input_ids=__snake_case ) __snake_case = outputs.logits __snake_case = torch.Size([1, 1, 5_0265] ) if "trocr-base-handwritten" in checkpoint_url: __snake_case = torch.tensor( [-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] ) elif "trocr-large-handwritten" in checkpoint_url: __snake_case = torch.tensor( [-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] ) elif "trocr-base-printed" in checkpoint_url: __snake_case = torch.tensor( [-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] ) elif "trocr-large-printed" in checkpoint_url: __snake_case = torch.tensor( [-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , __snake_case , atol=1e-3 ), "First elements of logits not as expected" Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__snake_case ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__snake_case ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) lowerCamelCase__ = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
524
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : str = {} class __UpperCamelCase ( _lowercase ): """simple docstring""" _lowercase : Optional[int] = '''llama''' _lowercase : Dict = ['''past_key_values'''] def __init__( self , SCREAMING_SNAKE_CASE=3_2_0_0_0 , SCREAMING_SNAKE_CASE=4_0_9_6 , SCREAMING_SNAKE_CASE=1_1_0_0_8 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="silu" , SCREAMING_SNAKE_CASE=2_0_4_8 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: a__ = vocab_size a__ = max_position_embeddings a__ = hidden_size a__ = intermediate_size a__ = num_hidden_layers a__ = num_attention_heads # for backward compatibility if num_key_value_heads is None: a__ = num_attention_heads a__ = num_key_value_heads a__ = hidden_act a__ = initializer_range a__ = rms_norm_eps a__ = pretraining_tp a__ = use_cache a__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) def _UpperCAmelCase ( self ) -> List[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"got {self.rope_scaling}" ) a__ = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE ) a__ = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
703
import random def __a ( __UpperCAmelCase , __UpperCAmelCase ): a__ , a__ , a__ = [], [], [] for element in data: if element < pivot: less.append(__UpperCAmelCase ) elif element > pivot: greater.append(__UpperCAmelCase ) else: equal.append(__UpperCAmelCase ) return less, equal, greater def __a ( __UpperCAmelCase , __UpperCAmelCase ): # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(__UpperCAmelCase ) or index < 0: return None a__ = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )] a__ = 0 a__ , a__ , a__ = _partition(__UpperCAmelCase , __UpperCAmelCase ) a__ = len(__UpperCAmelCase ) a__ = len(__UpperCAmelCase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__UpperCAmelCase , __UpperCAmelCase ) # must be in larger else: return quick_select(__UpperCAmelCase , index - (m + count) )
148
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class lowerCAmelCase_ ( lowerCAmelCase_ ): __a : Any = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) __a : Dict = '''CIDAS/clipseg-rd64-refined''' __a : List[str] = '''image_segmenter''' __a : Optional[int] = CLIPSegForImageSegmentation __a : Dict = ['''image''', '''text'''] __a : Any = ['''image'''] def __init__( self ,*snake_case__ ,**snake_case__ ): requires_backends(self ,['vision'] ) super().__init__(*_snake_case ,**_snake_case ) def snake_case ( self ,snake_case__ ,snake_case__ ): return self.pre_processor(text=[label] ,images=[image] ,padding=_snake_case ,return_tensors='pt' ) def snake_case ( self ,snake_case__ ): with torch.no_grad(): SCREAMING_SNAKE_CASE_ : str = self.model(**_snake_case ).logits return logits def snake_case ( self ,snake_case__ ): SCREAMING_SNAKE_CASE_ : Any = outputs.cpu().detach().numpy() SCREAMING_SNAKE_CASE_ : Optional[int] = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
105
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Tuple = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[str] = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
509
0
from __future__ import annotations def snake_case( __magic_name__ ) -> bool: '''simple docstring''' return len(set(__magic_name__ ) ) == len(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod()
596
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Optional[int] = IFImgaImgSuperResolutionPipeline _UpperCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} _UpperCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) _UpperCamelCase : str = PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self : Tuple ) -> Optional[Any]: """simple docstring""" return self._get_superresolution_dummy_components() def __a ( self : List[str] , _A : Optional[Any] , _A : Union[str, Any]=0 ) -> Optional[int]: """simple docstring""" if str(_A ).startswith('''mps''' ): lowercase : List[Any] = torch.manual_seed(_A ) else: lowercase : Dict = torch.Generator(device=_A ).manual_seed(_A ) lowercase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) lowercase : List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_A ) ).to(_A ) lowercase : Optional[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __a ( self : Optional[Any] ) -> Dict: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __a ( self : Tuple ) -> List[str]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __a ( self : Any ) -> str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def __a ( self : List[Any] ) -> List[str]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __a ( self : int ) -> List[str]: """simple docstring""" self._test_save_load_local() def __a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
596
1
'''simple docstring''' def __lowerCamelCase ( A__ ) -> bool: """simple docstring""" UpperCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
430
'''simple docstring''' def __lowerCamelCase ( A__ ) -> str: """simple docstring""" UpperCamelCase = int(A__ ) if decimal in (0, 1): # Exit cases for the recursion return str(A__ ) UpperCamelCase , UpperCamelCase = divmod(A__ , 2 ) return binary_recursive(A__ ) + str(A__ ) def __lowerCamelCase ( A__ ) -> str: """simple docstring""" UpperCamelCase = str(A__ ).strip() if not number: raise ValueError('No input value was provided' ) UpperCamelCase = '-' if number.startswith('-' ) else '' UpperCamelCase = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return F"""{negative}0b{binary_recursive(int(A__ ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
430
1
'''simple docstring''' from __future__ import annotations A = [True] * 1_000_001 A = 2 while i * i <= 1_000_000: if seive[i]: for j in range(i * i, 1_000_001, i): A = False i += 1 def snake_case_ ( a__ : int ): """simple docstring""" return seive[n] def snake_case_ ( a__ : int ): """simple docstring""" return any(digit in """02468""" for digit in str(a__ ) ) def snake_case_ ( a__ : int = 1_00_00_00 ): """simple docstring""" __lowercase = [2] # result already includes the number 2. for num in range(3 ,limit + 1 ,2 ): if is_prime(a__ ) and not contains_an_even_digit(a__ ): __lowercase = str(a__ ) __lowercase = [int(str_num[j:] + str_num[:j] ) for j in range(len(a__ ) )] if all(is_prime(a__ ) for i in list_nums ): result.append(a__ ) return result def snake_case_ ( ): """simple docstring""" return len(find_circular_primes() ) if __name__ == "__main__": print(F"""{len(find_circular_primes()) = }""")
710
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class SCREAMING_SNAKE_CASE: def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=1000 , ) -> Union[str, Any]: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope __lowercase = range_bbox def snake_case__ ( self ) -> Tuple: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment __lowercase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowercase = bbox[i, j, 3] __lowercase = bbox[i, j, 1] __lowercase = t if bbox[i, j, 2] < bbox[i, j, 0]: __lowercase = bbox[i, j, 2] __lowercase = bbox[i, j, 0] __lowercase = t __lowercase = tf.convert_to_tensor(lowerCamelCase__ ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase = ids_tensor([self.batch_size] , self.num_choices ) __lowercase = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: """simple docstring""" __lowercase = TFLayoutLMModel(config=lowerCamelCase__ ) __lowercase = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) __lowercase = model(lowerCamelCase__ , lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) __lowercase = model(lowerCamelCase__ , lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" __lowercase = TFLayoutLMForMaskedLM(config=lowerCamelCase__ ) __lowercase = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: """simple docstring""" __lowercase = self.num_labels __lowercase = TFLayoutLMForSequenceClassification(config=lowerCamelCase__ ) __lowercase = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: """simple docstring""" __lowercase = self.num_labels __lowercase = TFLayoutLMForTokenClassification(config=lowerCamelCase__ ) __lowercase = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: """simple docstring""" __lowercase = TFLayoutLMForQuestionAnswering(config=lowerCamelCase__ ) __lowercase = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__ ( self ) -> Optional[int]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE( __A , __A , unittest.TestCase ): snake_case_ : Union[str, Any] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) snake_case_ : Tuple = ( { """feature-extraction""": TFLayoutLMModel, """fill-mask""": TFLayoutLMForMaskedLM, """text-classification""": TFLayoutLMForSequenceClassification, """token-classification""": TFLayoutLMForTokenClassification, """zero-shot""": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) snake_case_ : Optional[int] = False snake_case_ : Optional[int] = True snake_case_ : Tuple = 10 def snake_case__ ( self ) -> Optional[int]: """simple docstring""" __lowercase = TFLayoutLMModelTester(self ) __lowercase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def snake_case__ ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def snake_case__ ( self ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def snake_case__ ( self ) -> Tuple: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def snake_case__ ( self ) -> Any: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ ) def snake_case__ ( self ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ ) def snake_case__ ( self ) -> Tuple: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ ) @slow def snake_case__ ( self ) -> Dict: """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFLayoutLMModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @unittest.skip("""Onnx compliancy broke with TF 2.10""" ) def snake_case__ ( self ) -> str: """simple docstring""" pass def snake_case_ ( ): """simple docstring""" __lowercase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231 __lowercase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 __lowercase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231 __lowercase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) __lowercase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class SCREAMING_SNAKE_CASE( unittest.TestCase ): @slow def snake_case__ ( self ) -> Optional[Any]: """simple docstring""" __lowercase = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" ) __lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = prepare_layoutlm_batch_inputs() # forward pass __lowercase = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) # test the sequence output on [0, :3, :3] __lowercase = tf.convert_to_tensor( [[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-3 ) ) # test the pooled output on [1, :3] __lowercase = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowerCamelCase__ , atol=1E-3 ) ) @slow def snake_case__ ( self ) -> Union[str, Any]: """simple docstring""" __lowercase = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 ) __lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = prepare_layoutlm_batch_inputs() # forward pass __lowercase = model( input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar __lowercase = outputs.loss __lowercase = (2,) self.assertEqual(loss.shape , lowerCamelCase__ ) # test the shape of the logits __lowercase = outputs.logits __lowercase = (2, 2) self.assertEqual(logits.shape , lowerCamelCase__ ) @slow def snake_case__ ( self ) -> Optional[Any]: """simple docstring""" __lowercase = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 ) __lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = prepare_layoutlm_batch_inputs() # forward pass __lowercase = model( input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) # test the shape of the logits __lowercase = outputs.logits __lowercase = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , lowerCamelCase__ ) @slow def snake_case__ ( self ) -> str: """simple docstring""" __lowercase = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" ) __lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = prepare_layoutlm_batch_inputs() # forward pass __lowercase = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) # test the shape of the logits __lowercase = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , lowerCamelCase__ ) self.assertEqual(outputs.end_logits.shape , lowerCamelCase__ )
163
0
def UpperCamelCase_ ( ) -> int: return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(__a , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f"""{solution() = }""")
37
import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ (__lowercase ): def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_a , "embed_dim" ) ) self.parent.assertTrue(hasattr(_a , "num_heads" ) ) class __magic_name__ : def __init__( self , _a , _a=13 , _a=64 , _a=3 , _a=[16, 48, 96] , _a=[1, 3, 6] , _a=[1, 2, 10] , _a=[7, 3, 3] , _a=[4, 2, 2] , _a=[2, 1, 1] , _a=[2, 2, 2] , _a=[False, False, True] , _a=[0.0, 0.0, 0.0] , _a=0.0_2 , _a=1E-12 , _a=True , _a=True , _a=2 , ) -> int: lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_sizes lowerCAmelCase_ = patch_stride lowerCAmelCase_ = patch_padding lowerCAmelCase_ = is_training lowerCAmelCase_ = use_labels lowerCAmelCase_ = num_labels lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = num_heads lowerCAmelCase_ = stride_kv lowerCAmelCase_ = depth lowerCAmelCase_ = cls_token lowerCAmelCase_ = attention_drop_rate lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps def __a ( self ) -> Any: lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase_ = self.get_config() return config, pixel_values, labels def __a ( self ) -> str: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __a ( self , _a , _a , _a ) -> List[Any]: lowerCAmelCase_ = CvtModel(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a ) lowerCAmelCase_ = (self.image_size, self.image_size) lowerCAmelCase_ , lowerCAmelCase_ = image_size[0], image_size[1] for i in range(len(self.depth ) ): lowerCAmelCase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) lowerCAmelCase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __a ( self , _a , _a , _a ) -> Optional[Any]: lowerCAmelCase_ = self.num_labels lowerCAmelCase_ = CvtForImageClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs lowerCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ): lowerCamelCase__ = (CvtModel, CvtForImageClassification) if is_torch_available() else () lowerCamelCase__ = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def __a ( self ) -> List[Any]: lowerCAmelCase_ = CvtModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def __a ( self ) -> Any: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __a ( self ) -> List[str]: return @unittest.skip(reason="Cvt does not output attentions" ) def __a ( self ) -> Optional[Any]: pass @unittest.skip(reason="Cvt does not use inputs_embeds" ) def __a ( self ) -> str: pass @unittest.skip(reason="Cvt does not support input and output embeddings" ) def __a ( self ) -> Union[str, Any]: pass def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = model_class(_a ) lowerCAmelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ = [*signature.parameters.keys()] lowerCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , _a ) def __a ( self ) -> Tuple: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __a ( self ) -> List[Any]: def check_hidden_states_output(_a , _a , _a ): lowerCAmelCase_ = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) ) lowerCAmelCase_ = outputs.hidden_states lowerCAmelCase_ = len(self.model_tester.depth ) self.assertEqual(len(_a ) , _a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ = True check_hidden_states_output(_a , _a , _a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __a ( self ) -> Optional[Any]: pass @slow def __a ( self ) -> Any: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = CvtModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def A(): lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ (unittest.TestCase ): @cached_property def __a ( self ) -> str: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __a ( self ) -> List[str]: lowerCAmelCase_ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a ) lowerCAmelCase_ = self.default_image_processor lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" ).to(_a ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**_a ) # verify the logits lowerCAmelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _a ) lowerCAmelCase_ = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
122
0
"""simple docstring""" from __future__ import annotations import os from typing import Any import requests __lowercase = """https://api.github.com""" # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user __lowercase = BASE_URL + """/user""" # https://github.com/settings/tokens __lowercase = os.environ.get("""USER_TOKEN""", """""") def lowercase ( A_ )-> dict[Any, Any]: '''simple docstring''' a : Optional[Any] = { "Authorization": F'''token {auth_token}''', "Accept": "application/vnd.github.v3+json", } return requests.get(A_ , headers=A_ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f'''{key}: {value}''') else: raise ValueError("""'USER_TOKEN' field cannot be empty.""")
700
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __lowercase = """.""" # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __lowercase = [ """Assert""", """AssignVariableOp""", """EmptyTensorList""", """MergeV2Checkpoints""", """ReadVariableOp""", """ResourceGather""", """RestoreV2""", """SaveV2""", """ShardedFilename""", """StatefulPartitionedCall""", """StaticRegexFullMatch""", """VarHandleOp""", ] def lowercase ( A_ , A_ , A_ )-> Optional[Any]: '''simple docstring''' a : str = SavedModel() a : List[Any] = [] with open(os.path.join(A_ , "utils" , "tf_ops" , "onnx.json" ) ) as f: a : Optional[int] = json.load(A_ )["opsets"] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(A_ )] ) with open(A_ , "rb" ) as f: saved_model.ParseFromString(f.read() ) a : List[str] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want a : Union[str, Any] = sorted(A_ ) a : Dict = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(A_ ) if strict and len(A_ ) > 0: raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(A_ ) > 0: print(F'''Found the following incompatible ops for the opset {opset}:''' ) print(*A_ , sep="\n" ) else: print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""") parser.add_argument( """--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested.""" ) parser.add_argument( """--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model.""" ) parser.add_argument( """--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)""" ) __lowercase = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
135
0
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED _lowercase = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } _lowercase = { '''allenai/led-base-16384''': 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def UpperCamelCase ( ): lowerCAmelCase_ : Optional[int] = ( list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1)) ) lowerCAmelCase_ : List[Any] = bs[:] lowerCAmelCase_ : Optional[int] = 0 for b in range(2**8): if b not in bs: bs.append(snake_case__) cs.append(2**8 + n) n += 1 lowerCAmelCase_ : Tuple = [chr(snake_case__) for n in cs] return dict(zip(snake_case__ , snake_case__)) def UpperCamelCase ( snake_case__): lowerCAmelCase_ : str = set() lowerCAmelCase_ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char)) lowerCAmelCase_ : Union[str, Any] = char return pairs class __snake_case ( snake_case__ ): """simple docstring""" UpperCamelCase_ = VOCAB_FILES_NAMES UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ = ['input_ids', 'attention_mask'] def __init__( self : int ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Tuple="replace" ,lowerCAmelCase__ : Optional[int]="<s>" ,lowerCAmelCase__ : Optional[int]="</s>" ,lowerCAmelCase__ : Tuple="</s>" ,lowerCAmelCase__ : int="<s>" ,lowerCAmelCase__ : Union[str, Any]="<unk>" ,lowerCAmelCase__ : str="<pad>" ,lowerCAmelCase__ : Tuple="<mask>" ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Tuple ,) -> Any: '''simple docstring''' lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token super().__init__( errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,) with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle: lowerCAmelCase_ : List[str] = json.load(lowerCAmelCase__ ) lowerCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()} lowerCAmelCase_ : Optional[int] = errors # how to handle errors in decoding lowerCAmelCase_ : Optional[int] = bytes_to_unicode() lowerCAmelCase_ : str = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle: lowerCAmelCase_ : List[str] = merges_handle.read().split("\n" )[1:-1] lowerCAmelCase_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase_ : Union[str, Any] = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) ) lowerCAmelCase_ : Dict = {} lowerCAmelCase_ : List[str] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase_ : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def UpperCAmelCase_ ( self : Dict ) -> Dict: '''simple docstring''' return len(self.encoder ) def UpperCAmelCase_ ( self : Dict ) -> str: '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Dict: '''simple docstring''' if token in self.cache: return self.cache[token] lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ ) lowerCAmelCase_ : str = get_pairs(lowerCAmelCase__ ) if not pairs: return token while True: lowerCAmelCase_ : Optional[int] = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = bigram lowerCAmelCase_ : Tuple = [] lowerCAmelCase_ : str = 0 while i < len(lowerCAmelCase__ ): try: lowerCAmelCase_ : Union[str, Any] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase_ : List[str] = j if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase_ : Optional[int] = tuple(lowerCAmelCase__ ) lowerCAmelCase_ : Tuple = new_word if len(lowerCAmelCase__ ) == 1: break else: lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ ) lowerCAmelCase_ : Optional[Any] = " ".join(lowerCAmelCase__ ) lowerCAmelCase_ : Optional[Any] = word return word def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ : Any = [] for token in re.findall(self.pat ,lowerCAmelCase__ ): lowerCAmelCase_ : Optional[int] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) ) return bpe_tokens def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' return self.decoder.get(lowerCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[Any] ) -> Any: '''simple docstring''' lowerCAmelCase_ : int = "".join(lowerCAmelCase__ ) lowerCAmelCase_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors ) return text def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase_ : Optional[int] = os.path.join( lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase_ : List[str] = os.path.join( lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" ) lowerCAmelCase_ : Dict = 0 with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) lowerCAmelCase_ : List[Any] = token_index writer.write(" ".join(lowerCAmelCase__ ) + "\n" ) index += 1 return vocab_file, merge_file def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] lowerCAmelCase_ : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1] def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' lowerCAmelCase_ : Optional[int] = [self.sep_token_id] lowerCAmelCase_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : str ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ : Optional[int] = kwargs.pop("add_prefix_space" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()): lowerCAmelCase_ : List[str] = " " + text return (text, kwargs) def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,) -> dict: '''simple docstring''' lowerCAmelCase_ : int = super()._pad( encoded_inputs=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding_strategy=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,) # Load from model defaults if return_attention_mask is None: lowerCAmelCase_ : List[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCAmelCase_ : Dict = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCAmelCase_ : List[Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCAmelCase__ ) if needs_to_be_padded: lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCAmelCase_ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCAmelCase_ : List[Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
659
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _lowercase = '''src/diffusers''' _lowercase = '''.''' # This is to make sure the diffusers module imported is the one in the repo. _lowercase = importlib.util.spec_from_file_location( '''diffusers''', os.path.join(DIFFUSERS_PATH, '''__init__.py'''), submodule_search_locations=[DIFFUSERS_PATH], ) _lowercase = spec.loader.load_module() def UpperCamelCase ( snake_case__ , snake_case__): return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None def UpperCamelCase ( snake_case__): lowerCAmelCase_ : Tuple = object_name.split(".") lowerCAmelCase_ : Union[str, Any] = 0 # First let's find the module where our object lives. lowerCAmelCase_ : Union[str, Any] = parts[i] while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')): i += 1 if i < len(snake_case__): lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i]) if i >= len(snake_case__): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''') with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f: lowerCAmelCase_ : Optional[Any] = f.readlines() # Now let's find the class / func in the code! lowerCAmelCase_ : Union[str, Any] = "" lowerCAmelCase_ : int = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case__): raise ValueError(F''' {object_name} does not match any function or class in {module}.''') # We found the beginning of the class / func, now let's find the end (when the indent diminishes). lowerCAmelCase_ : Union[str, Any] = line_index while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1]) <= 1: line_index -= 1 lowerCAmelCase_ : List[str] = lines[start_index:line_index] return "".join(snake_case__) _lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''') _lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''') _lowercase = re.compile(r'''<FILL\s+[^>]*>''') def UpperCamelCase ( snake_case__): lowerCAmelCase_ : Any = code.split("\n") lowerCAmelCase_ : Any = 0 while idx < len(snake_case__) and len(lines[idx]) == 0: idx += 1 if idx < len(snake_case__): return re.search(R"^(\s*)\S" , lines[idx]).groups()[0] return "" def UpperCamelCase ( snake_case__): lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0 if has_indent: lowerCAmelCase_ : Dict = F'''class Bla:\n{code}''' lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__) lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__) lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__) return result[len("class Bla:\n") :] if has_indent else result def UpperCamelCase ( snake_case__ , snake_case__=False): with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f: lowerCAmelCase_ : Tuple = f.readlines() lowerCAmelCase_ : Tuple = [] lowerCAmelCase_ : Union[str, Any] = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case__): lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index]) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups() lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__) lowerCAmelCase_ : Dict = get_indent(snake_case__) lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2 lowerCAmelCase_ : str = theoretical_indent lowerCAmelCase_ : Union[str, Any] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. lowerCAmelCase_ : Optional[int] = True while line_index < len(snake_case__) and should_continue: line_index += 1 if line_index >= len(snake_case__): break lowerCAmelCase_ : Dict = lines[line_index] lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1]) <= 1: line_index -= 1 lowerCAmelCase_ : Dict = lines[start_index:line_index] lowerCAmelCase_ : Optional[int] = "".join(snake_case__) # Remove any nested `Copied from` comments to avoid circular copies lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None] lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case__) > 0: lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",") lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns] for pattern in patterns: if pattern is None: continue lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups() lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__) if option.strip() == "all-casing": lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__) lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code) lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index]) if overwrite: lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:] lowerCAmelCase_ : Union[str, Any] = start_index + 1 if overwrite and len(snake_case__) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''') with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f: f.writelines(snake_case__) return diffs def UpperCamelCase ( snake_case__ = False): lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__) lowerCAmelCase_ : int = [] for filename in all_files: lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case__) > 0: lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.") if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _lowercase = parser.parse_args() check_copies(args.fix_and_overwrite)
659
1
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : List[Any] = logging.get_logger(__name__) def A ( snake_case :Optional[int] ) -> Any: print('Loading config file...' ) def flatten_yaml_as_dict(snake_case :Optional[int] , snake_case :List[str]="" , snake_case :str="." ): __UpperCamelCase = [] for k, v in d.items(): __UpperCamelCase = parent_key + sep + k if parent_key else k if isinstance(a_ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(a_ , a_ , sep=a_ ).items() ) else: items.append((new_key, v) ) return dict(a_ ) __UpperCamelCase = argparse.Namespace() with open(a_ , 'r' ) as yaml_file: try: __UpperCamelCase = yaml.load(a_ , Loader=yaml.FullLoader ) __UpperCamelCase = flatten_yaml_as_dict(a_ ) for k, v in flat_cfg.items(): setattr(a_ , a_ , a_ ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(a_ , str(a_ ) ) ) return config def A ( snake_case :Any , snake_case :Any ) -> Tuple: __UpperCamelCase = MobileViTVaConfig() __UpperCamelCase = False # dataset if task_name.startswith('imagenet1k_' ): __UpperCamelCase = 1_0_0_0 if int(task_name.strip().split('_' )[-1] ) == 3_8_4: __UpperCamelCase = 3_8_4 else: __UpperCamelCase = 2_5_6 __UpperCamelCase = '''imagenet-1k-id2label.json''' elif task_name.startswith('imagenet21k_to_1k_' ): __UpperCamelCase = 2_1_0_0_0 if int(task_name.strip().split('_' )[-1] ) == 3_8_4: __UpperCamelCase = 3_8_4 else: __UpperCamelCase = 2_5_6 __UpperCamelCase = '''imagenet-22k-id2label.json''' elif task_name.startswith('ade20k_' ): __UpperCamelCase = 1_5_1 __UpperCamelCase = 5_1_2 __UpperCamelCase = '''ade20k-id2label.json''' __UpperCamelCase = True elif task_name.startswith('voc_' ): __UpperCamelCase = 2_1 __UpperCamelCase = 5_1_2 __UpperCamelCase = '''pascal-voc-id2label.json''' __UpperCamelCase = True # orig_config __UpperCamelCase = load_orig_config_file(a_ ) assert getattr(a_ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" __UpperCamelCase = getattr(a_ , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(a_ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" __UpperCamelCase = getattr(a_ , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: __UpperCamelCase = getattr(a_ , 'model.segmentation.output_stride' , 1_6 ) if "_deeplabv3" in task_name: __UpperCamelCase = getattr(a_ , 'model.segmentation.deeplabv3.aspp_rates' , [1_2, 2_4, 3_6] ) __UpperCamelCase = getattr(a_ , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_1_2 ) __UpperCamelCase = getattr(a_ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label __UpperCamelCase = '''huggingface/label-files''' __UpperCamelCase = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) ) __UpperCamelCase = {int(a_ ): v for k, v in idalabel.items()} __UpperCamelCase = idalabel __UpperCamelCase = {v: k for k, v in idalabel.items()} return config def A ( snake_case :str , snake_case :Optional[int] , snake_case :Any ) -> int: __UpperCamelCase = dct.pop(a_ ) __UpperCamelCase = val def A ( snake_case :str , snake_case :Dict=False ) -> int: if base_model: __UpperCamelCase = '''''' else: __UpperCamelCase = '''mobilevitv2.''' __UpperCamelCase = [] for k in state_dict.keys(): if k[:8] == "encoder.": __UpperCamelCase = k[8:] else: __UpperCamelCase = k if ".block." in k: __UpperCamelCase = k_new.replace('.block.' , '.' ) if ".conv." in k: __UpperCamelCase = k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: __UpperCamelCase = k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: __UpperCamelCase = k_new.replace('conv_1.' , f'{model_prefix}conv_stem.' ) for i in [1, 2]: if f'layer_{i}.' in k: __UpperCamelCase = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' ) if ".exp_1x1." in k: __UpperCamelCase = k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: __UpperCamelCase = k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if f'layer_{i}.0.' in k: __UpperCamelCase = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' ) if f'layer_{i}.1.local_rep.0.' in k: __UpperCamelCase = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' ) if f'layer_{i}.1.local_rep.1.' in k: __UpperCamelCase = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' ) for i in [3, 4, 5]: if i == 3: __UpperCamelCase = [0, 1] elif i == 4: __UpperCamelCase = [0, 1, 2, 3] elif i == 5: __UpperCamelCase = [0, 1, 2] for j in j_in: if f'layer_{i}.1.global_rep.{j}.' in k: __UpperCamelCase = k_new.replace( f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' ) if f'layer_{i}.1.global_rep.{j+1}.' in k: __UpperCamelCase = k_new.replace( f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' ) if f'layer_{i}.1.conv_proj.' in k: __UpperCamelCase = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' ) if "pre_norm_attn.0." in k: __UpperCamelCase = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: __UpperCamelCase = k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: __UpperCamelCase = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: __UpperCamelCase = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: __UpperCamelCase = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: __UpperCamelCase = k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: __UpperCamelCase = k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: __UpperCamelCase = k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: __UpperCamelCase = k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def A ( snake_case :Tuple ) -> str: __UpperCamelCase = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(a_ ) for k in keys_to_ignore: state_dict.pop(a_ , a_ ) def A ( ) -> Tuple: __UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" __UpperCamelCase = Image.open(requests.get(a_ , stream=a_ ).raw ) return im @torch.no_grad() def A ( snake_case :Tuple , snake_case :str , snake_case :List[str] , snake_case :Optional[int] ) -> str: __UpperCamelCase = get_mobilevitva_config(a_ , a_ ) # load original state_dict __UpperCamelCase = torch.load(a_ , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): __UpperCamelCase = MobileViTVaForSemanticSegmentation(a_ ).eval() __UpperCamelCase = False else: __UpperCamelCase = MobileViTVaForImageClassification(a_ ).eval() __UpperCamelCase = False # remove and rename some keys of load the original model __UpperCamelCase = checkpoint remove_unused_keys(a_ ) __UpperCamelCase = create_rename_keys(a_ , base_model=a_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(a_ , a_ , a_ ) # load modified state_dict model.load_state_dict(a_ ) # Check outputs on an image, prepared by MobileViTImageProcessor __UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 ) __UpperCamelCase = image_processor(images=prepare_img() , return_tensors='pt' ) __UpperCamelCase = model(**a_ ) # verify classification model if task_name.startswith('imagenet' ): __UpperCamelCase = outputs.logits __UpperCamelCase = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant __UpperCamelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ) assert torch.allclose(logits[0, :3] , a_ , atol=1e-4 ) Path(a_ ).mkdir(exist_ok=a_ ) print(f'Saving model {task_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(a_ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(a_ ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) UpperCamelCase : Tuple = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
713
"""simple docstring""" import argparse import struct import unittest class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = data # Initialize hash values __UpperCamelCase = [ 0x6a_09_e6_67, 0xbb_67_ae_85, 0x3c_6e_f3_72, 0xa5_4f_f5_3a, 0x51_0e_52_7f, 0x9b_05_68_8c, 0x1f_83_d9_ab, 0x5b_e0_cd_19, ] # Initialize round constants __UpperCamelCase = [ 0x42_8a_2f_98, 0x71_37_44_91, 0xb5_c0_fb_cf, 0xe9_b5_db_a5, 0x39_56_c2_5b, 0x59_f1_11_f1, 0x92_3f_82_a4, 0xab_1c_5e_d5, 0xd8_07_aa_98, 0x12_83_5b_01, 0x24_31_85_be, 0x55_0c_7d_c3, 0x72_be_5d_74, 0x80_de_b1_fe, 0x9b_dc_06_a7, 0xc1_9b_f1_74, 0xe4_9b_69_c1, 0xef_be_47_86, 0x0f_c1_9d_c6, 0x24_0c_a1_cc, 0x2d_e9_2c_6f, 0x4a_74_84_aa, 0x5c_b0_a9_dc, 0x76_f9_88_da, 0x98_3e_51_52, 0xa8_31_c6_6d, 0xb0_03_27_c8, 0xbf_59_7f_c7, 0xc6_e0_0b_f3, 0xd5_a7_91_47, 0x06_ca_63_51, 0x14_29_29_67, 0x27_b7_0a_85, 0x2e_1b_21_38, 0x4d_2c_6d_fc, 0x53_38_0d_13, 0x65_0a_73_54, 0x76_6a_0a_bb, 0x81_c2_c9_2e, 0x92_72_2c_85, 0xa2_bf_e8_a1, 0xa8_1a_66_4b, 0xc2_4b_8b_70, 0xc7_6c_51_a3, 0xd1_92_e8_19, 0xd6_99_06_24, 0xf4_0e_35_85, 0x10_6a_a0_70, 0x19_a4_c1_16, 0x1e_37_6c_08, 0x27_48_77_4c, 0x34_b0_bc_b5, 0x39_1c_0c_b3, 0x4e_d8_aa_4a, 0x5b_9c_ca_4f, 0x68_2e_6f_f3, 0x74_8f_82_ee, 0x78_a5_63_6f, 0x84_c8_78_14, 0x8c_c7_02_08, 0x90_be_ff_fa, 0xa4_50_6c_eb, 0xbe_f9_a3_f7, 0xc6_71_78_f2, ] __UpperCamelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCAmelCase ( __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = b'\x80' + (b'\x00' * (63 - (len(__UpperCAmelCase ) + 8) % 64)) __UpperCamelCase = struct.pack('>Q' , (len(__UpperCAmelCase ) * 8) ) return data + padding + big_endian_integer def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __UpperCamelCase = list(struct.unpack('>16L' , __UpperCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __UpperCamelCase = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) __UpperCamelCase = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) __UpperCamelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression __UpperCamelCase = self.ror(__UpperCAmelCase , 6 ) ^ self.ror(__UpperCAmelCase , 11 ) ^ self.ror(__UpperCAmelCase , 25 ) __UpperCamelCase = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g) __UpperCamelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 __UpperCamelCase = self.ror(__UpperCAmelCase , 2 ) ^ self.ror(__UpperCAmelCase , 13 ) ^ self.ror(__UpperCAmelCase , 22 ) __UpperCamelCase = (a & b) ^ (a & c) ^ (b & c) __UpperCamelCase = (sa + maj) % 0x1_00_00_00_00 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) __UpperCamelCase = [a, b, c, d, e, f, g, h] # Modify final values __UpperCamelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] __UpperCamelCase = ''.join([hex(__UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations) class __lowerCAmelCase ( unittest.TestCase ): def UpperCAmelCase ( self ): '''simple docstring''' import hashlib __UpperCamelCase = bytes('Test String' , 'utf-8' ) self.assertEqual(SHAaaa(__UpperCAmelCase ).hash , hashlib.shaaaa(__UpperCAmelCase ).hexdigest() ) def A ( ) -> None: import doctest doctest.testmod() __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: __UpperCamelCase = f.read() else: __UpperCamelCase = bytes(snake_case , 'utf-8' ) print(SHAaaa(snake_case ).hash ) if __name__ == "__main__": main()
293
0
from __future__ import annotations import math import random from typing import Any class __magic_name__ : def __init__( self : List[Any] ): UpperCAmelCase = [] UpperCAmelCase = 0 UpperCAmelCase = 0 def _UpperCAmelCase ( self : Union[str, Any] ): return self.head == self.tail def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : Any ): self.data.append(__SCREAMING_SNAKE_CASE ) UpperCAmelCase = self.tail + 1 def _UpperCAmelCase ( self : Tuple ): UpperCAmelCase = self.data[self.head] UpperCAmelCase = self.head + 1 return ret def _UpperCAmelCase ( self : List[str] ): return self.tail - self.head def _UpperCAmelCase ( self : Tuple ): print(self.data ) print("**************" ) print(self.data[self.head : self.tail] ) class __magic_name__ : def __init__( self : Tuple ,__SCREAMING_SNAKE_CASE : Any ): UpperCAmelCase = data UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = 1 def _UpperCAmelCase ( self : Optional[int] ): return self.data def _UpperCAmelCase ( self : List[str] ): return self.left def _UpperCAmelCase ( self : Union[str, Any] ): return self.right def _UpperCAmelCase ( self : Any ): return self.height def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Any ): UpperCAmelCase = data def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : MyNode | None ): UpperCAmelCase = node def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : MyNode | None ): UpperCAmelCase = node def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : int ): UpperCAmelCase = height def __UpperCamelCase ( _lowerCAmelCase ): """simple docstring""" if node is None: return 0 return node.get_height() def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" if a > b: return a return b def __UpperCamelCase ( _lowerCAmelCase ): """simple docstring""" print("left rotation node:" , node.get_data() ) UpperCAmelCase = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(_lowerCAmelCase ) UpperCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_lowerCAmelCase ) UpperCAmelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(_lowerCAmelCase ) return ret def __UpperCamelCase ( _lowerCAmelCase ): """simple docstring""" print("right rotation node:" , node.get_data() ) UpperCAmelCase = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(_lowerCAmelCase ) UpperCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_lowerCAmelCase ) UpperCAmelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(_lowerCAmelCase ) return ret def __UpperCamelCase ( _lowerCAmelCase ): """simple docstring""" UpperCAmelCase = node.get_left() assert left_child is not None node.set_left(left_rotation(_lowerCAmelCase ) ) return right_rotation(_lowerCAmelCase ) def __UpperCamelCase ( _lowerCAmelCase ): """simple docstring""" UpperCAmelCase = node.get_right() assert right_child is not None node.set_right(right_rotation(_lowerCAmelCase ) ) return left_rotation(_lowerCAmelCase ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" if node is None: return MyNode(_lowerCAmelCase ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , _lowerCAmelCase ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected UpperCAmelCase = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child UpperCAmelCase = right_rotation(_lowerCAmelCase ) else: UpperCAmelCase = lr_rotation(_lowerCAmelCase ) else: node.set_right(insert_node(node.get_right() , _lowerCAmelCase ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: UpperCAmelCase = node.get_right() assert right_child is not None if data < right_child.get_data(): UpperCAmelCase = rl_rotation(_lowerCAmelCase ) else: UpperCAmelCase = left_rotation(_lowerCAmelCase ) UpperCAmelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_lowerCAmelCase ) return node def __UpperCamelCase ( _lowerCAmelCase ): """simple docstring""" while True: UpperCAmelCase = root.get_right() if right_child is None: break UpperCAmelCase = right_child return root.get_data() def __UpperCamelCase ( _lowerCAmelCase ): """simple docstring""" while True: UpperCAmelCase = root.get_left() if left_child is None: break UpperCAmelCase = left_child return root.get_data() def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" UpperCAmelCase = root.get_left() UpperCAmelCase = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: UpperCAmelCase = get_left_most(_lowerCAmelCase ) root.set_data(_lowerCAmelCase ) root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase ) ) elif left_child is not None: UpperCAmelCase = left_child elif right_child is not None: UpperCAmelCase = right_child else: return None elif root.get_data() > data: if left_child is None: print("No such data" ) return root else: root.set_left(del_node(_lowerCAmelCase , _lowerCAmelCase ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase ) ) if get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): UpperCAmelCase = left_rotation(_lowerCAmelCase ) else: UpperCAmelCase = rl_rotation(_lowerCAmelCase ) elif get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): UpperCAmelCase = right_rotation(_lowerCAmelCase ) else: UpperCAmelCase = lr_rotation(_lowerCAmelCase ) UpperCAmelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(_lowerCAmelCase ) return root class __magic_name__ : def __init__( self : Tuple ): UpperCAmelCase = None def _UpperCAmelCase ( self : Optional[int] ): return get_height(self.root ) def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Any ): print("insert:" + str(__SCREAMING_SNAKE_CASE ) ) UpperCAmelCase = insert_node(self.root ,__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Any ): print("delete:" + str(__SCREAMING_SNAKE_CASE ) ) if self.root is None: print("Tree is empty!" ) return UpperCAmelCase = del_node(self.root ,__SCREAMING_SNAKE_CASE ) def __str__( self : Dict ,): # a level traversale, gives a more intuitive look on the tree UpperCAmelCase = "" UpperCAmelCase = MyQueue() q.push(self.root ) UpperCAmelCase = self.get_height() if layer == 0: return output UpperCAmelCase = 0 while not q.is_empty(): UpperCAmelCase = q.pop() UpperCAmelCase = " " * int(math.pow(2 ,layer - 1 ) ) output += space if node is None: output += "*" q.push(__SCREAMING_SNAKE_CASE ) q.push(__SCREAMING_SNAKE_CASE ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space UpperCAmelCase = cnt + 1 for i in range(1_0_0 ): if cnt == math.pow(2 ,__SCREAMING_SNAKE_CASE ) - 1: UpperCAmelCase = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def __UpperCamelCase ( ): """simple docstring""" import doctest doctest.testmod() if __name__ == "__main__": _test() __lowerCAmelCase =AVLtree() __lowerCAmelCase =list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
333
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : def __init__( self : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str=1_3 ,__SCREAMING_SNAKE_CASE : Optional[Any]=7 ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,__SCREAMING_SNAKE_CASE : List[str]=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : Tuple=9_9 ,__SCREAMING_SNAKE_CASE : str=3_2 ,__SCREAMING_SNAKE_CASE : Any=2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=4 ,__SCREAMING_SNAKE_CASE : Tuple=3_7 ,__SCREAMING_SNAKE_CASE : List[str]="gelu" ,__SCREAMING_SNAKE_CASE : List[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Tuple=5_1_2 ,__SCREAMING_SNAKE_CASE : Dict=1_6 ,__SCREAMING_SNAKE_CASE : Tuple=2 ,__SCREAMING_SNAKE_CASE : List[str]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[Any]=3 ,__SCREAMING_SNAKE_CASE : Dict=4 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : Dict=1_0_0_0 ,): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope UpperCAmelCase = range_bbox def _UpperCAmelCase ( self : Dict ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) # convert bbox to numpy since TF does not support item assignment UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase = bbox[i, j, 3] UpperCAmelCase = bbox[i, j, 1] UpperCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase = bbox[i, j, 2] UpperCAmelCase = bbox[i, j, 0] UpperCAmelCase = t UpperCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase = LayoutLMConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ): UpperCAmelCase = TFLayoutLMModel(config=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ): UpperCAmelCase = TFLayoutLMForMaskedLM(config=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ): UpperCAmelCase = self.num_labels UpperCAmelCase = TFLayoutLMForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ): UpperCAmelCase = self.num_labels UpperCAmelCase = TFLayoutLMForTokenClassification(config=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ): UpperCAmelCase = TFLayoutLMForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self : List[Any] ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( _a , _a , unittest.TestCase): _UpperCAmelCase : Optional[int] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _UpperCAmelCase : str = ( { 'feature-extraction': TFLayoutLMModel, 'fill-mask': TFLayoutLMForMaskedLM, 'text-classification': TFLayoutLMForSequenceClassification, 'token-classification': TFLayoutLMForTokenClassification, 'zero-shot': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _UpperCAmelCase : Tuple = False _UpperCAmelCase : int = True _UpperCAmelCase : Union[str, Any] = 10 def _UpperCAmelCase ( self : Tuple ): UpperCAmelCase = TFLayoutLMModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=3_7 ) def _UpperCAmelCase ( self : List[str] ): self.config_tester.run_common_tests() def _UpperCAmelCase ( self : List[str] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Dict ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Dict ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Any ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : List[str] ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) @slow def _UpperCAmelCase ( self : List[str] ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFLayoutLMModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def _UpperCAmelCase ( self : List[str] ): pass def __UpperCamelCase ( ): """simple docstring""" UpperCAmelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231 UpperCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 UpperCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231 UpperCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) UpperCAmelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase): @slow def _UpperCAmelCase ( self : Any ): UpperCAmelCase = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) # test the sequence output on [0, :3, :3] UpperCAmelCase = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] ,) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) ) # test the pooled output on [1, :3] UpperCAmelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) ) @slow def _UpperCAmelCase ( self : Union[str, Any] ): # initialize model with randomly initialized sequence classification head UpperCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=2 ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase = model( input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=tf.convert_to_tensor([1, 1] ) ,) # test whether we get a loss as a scalar UpperCAmelCase = outputs.loss UpperCAmelCase = (2,) self.assertEqual(loss.shape ,__SCREAMING_SNAKE_CASE ) # test the shape of the logits UpperCAmelCase = outputs.logits UpperCAmelCase = (2, 2) self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE ) @slow def _UpperCAmelCase ( self : Tuple ): # initialize model with randomly initialized token classification head UpperCAmelCase = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=1_3 ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase = model( input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE ) # test the shape of the logits UpperCAmelCase = outputs.logits UpperCAmelCase = tf.convert_to_tensor((2, 2_5, 1_3) ) self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE ) @slow def _UpperCAmelCase ( self : List[Any] ): # initialize model with randomly initialized token classification head UpperCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ) # test the shape of the logits UpperCAmelCase = tf.convert_to_tensor((2, 2_5) ) self.assertEqual(outputs.start_logits.shape ,__SCREAMING_SNAKE_CASE ) self.assertEqual(outputs.end_logits.shape ,__SCREAMING_SNAKE_CASE )
333
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , ): '''simple docstring''' a_ : Optional[int] = parent a_ : Union[str, Any] = batch_size a_ : Any = num_channels a_ : Tuple = image_size a_ : Any = min_resolution a_ : Dict = max_resolution a_ : Optional[Any] = do_resize a_ : Tuple = size if size is not None else {"""height""": 18, """width""": 20} a_ : int = do_thumbnail a_ : Optional[int] = do_align_axis a_ : Tuple = do_pad a_ : int = do_normalize a_ : Tuple = image_mean a_ : Optional[Any] = image_std def _lowerCAmelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ): """simple docstring""" a_ = DonutImageProcessor if is_vision_available() else None def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Optional[int] = DonutImageProcessingTester(self ) @property def _lowerCAmelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , """do_thumbnail""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , """do_align_long_axis""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , """do_pad""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , """image_std""" ) ) def _lowerCAmelCase ( self ): '''simple docstring''' a_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) a_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order a_ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def _lowerCAmelCase ( self ): '''simple docstring''' pass @is_flaky() def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase_ , Image.Image ) # Test not batched input a_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched a_ : int = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def _lowerCAmelCase ( self ): '''simple docstring''' a_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase_ , np.ndarray ) # Test not batched input a_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched a_ : Dict = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase_ , torch.Tensor ) # Test not batched input a_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched a_ : Any = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
460
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case: Dict = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case: List[str] = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case: Any = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case: List[str] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys __snake_case: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
460
1
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) -> int: # load base model __lowerCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __lowerCamelCase : List[str] = load_file(UpperCAmelCase_ ) __lowerCamelCase : Dict = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __lowerCamelCase : Optional[int] = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) __lowerCamelCase : Union[str, Any] = pipeline.text_encoder else: __lowerCamelCase : int = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) __lowerCamelCase : Any = pipeline.unet # find the target layer __lowerCamelCase : List[Any] = layer_infos.pop(0 ) while len(UpperCAmelCase_ ) > -1: try: __lowerCamelCase : Optional[int] = curr_layer.__getattr__(UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: __lowerCamelCase : str = layer_infos.pop(0 ) elif len(UpperCAmelCase_ ) == 0: break except Exception: if len(UpperCAmelCase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __lowerCamelCase : Dict = layer_infos.pop(0 ) __lowerCamelCase : Optional[int] = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(UpperCAmelCase_ ) else: pair_keys.append(UpperCAmelCase_ ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __lowerCamelCase : List[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __lowerCamelCase : Optional[int] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_ , UpperCAmelCase_ ).unsqueeze(2 ).unsqueeze(3 ) else: __lowerCamelCase : Optional[int] = state_dict[pair_keys[0]].to(torch.floataa ) __lowerCamelCase : Any = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_ , UpperCAmelCase_ ) # update visited list for item in pair_keys: visited.append(UpperCAmelCase_ ) return pipeline if __name__ == "__main__": A__ : List[str] = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.7_5, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") A__ : Optional[int] = parser.parse_args() A__ : Tuple = args.base_model_path A__ : List[Any] = args.checkpoint_path A__ : Union[str, Any] = args.dump_path A__ : List[Any] = args.lora_prefix_unet A__ : Tuple = args.lora_prefix_text_encoder A__ : Tuple = args.alpha A__ : Tuple = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) A__ : List[Any] = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
13
'''simple docstring''' from __future__ import annotations A__ : int = 10 def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]: __lowerCamelCase : List[Any] = 1 __lowerCamelCase : Any = max(UpperCAmelCase_ ) while placement <= max_digit: # declare and initialize empty buckets __lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )] # split list_of_ints between the buckets for i in list_of_ints: __lowerCamelCase : List[Any] = int((i / placement) % RADIX ) buckets[tmp].append(UpperCAmelCase_ ) # put each buckets' contents into list_of_ints __lowerCamelCase : Tuple = 0 for b in range(UpperCAmelCase_ ): for i in buckets[b]: __lowerCamelCase : List[Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
13
1
import enum import shutil import sys lowerCamelCase__ , lowerCamelCase__ = shutil.get_terminal_size() lowerCamelCase__ = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''} class __magic_name__ (enum.Enum ): lowerCamelCase__ = 0 lowerCamelCase__ = 1 def A(__a: int , __a: Optional[int]="" ): sys.stdout.write(str(__a ) + end ) sys.stdout.flush() def A(__a: Optional[int] , __a: Union[str, Any] , __a: Optional[int]="" ): forceWrite(F"\u001b[{color}m{content}\u001b[0m" , __a ) def A(): forceWrite("\r" ) def A(__a: int , __a: str ): forceWrite(F"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" ) def A(): forceWrite(" " * TERMINAL_WIDTH ) reset_cursor() def A(): reset_cursor() forceWrite("-" * TERMINAL_WIDTH )
714
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''owlvit_text_model''' def __init__( self , _a=49408 , _a=512 , _a=2048 , _a=12 , _a=8 , _a=16 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.0_2 , _a=1.0 , _a=0 , _a=49406 , _a=49407 , **_a , ) -> List[str]: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = hidden_act lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = initializer_range lowerCAmelCase_ = initializer_factor @classmethod def __a ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_a , **_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": lowerCAmelCase_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_a , **_a ) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''owlvit_vision_model''' def __init__( self , _a=768 , _a=3072 , _a=12 , _a=12 , _a=3 , _a=768 , _a=32 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.0_2 , _a=1.0 , **_a , ) -> Union[str, Any]: super().__init__(**_a ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = initializer_range lowerCAmelCase_ = initializer_factor @classmethod def __a ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_a , **_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": lowerCAmelCase_ = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_a , **_a ) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''owlvit''' lowerCamelCase__ = True def __init__( self , _a=None , _a=None , _a=512 , _a=2.6_5_9_2 , _a=True , **_a , ) -> Tuple: super().__init__(**_a ) if text_config is None: lowerCAmelCase_ = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: lowerCAmelCase_ = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) lowerCAmelCase_ = OwlViTTextConfig(**_a ) lowerCAmelCase_ = OwlViTVisionConfig(**_a ) lowerCAmelCase_ = projection_dim lowerCAmelCase_ = logit_scale_init_value lowerCAmelCase_ = return_dict lowerCAmelCase_ = 1.0 @classmethod def __a ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_a , **_a ) if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_a , **_a ) @classmethod def __a ( cls , _a , _a , **_a ) -> Union[str, Any]: lowerCAmelCase_ = {} lowerCAmelCase_ = text_config lowerCAmelCase_ = vision_config return cls.from_dict(_a , **_a ) def __a ( self ) -> int: lowerCAmelCase_ = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ = self.text_config.to_dict() lowerCAmelCase_ = self.vision_config.to_dict() lowerCAmelCase_ = self.__class__.model_type return output class __magic_name__ (__lowercase ): @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def __a ( self ) -> float: return 1E-4 def __a ( self , _a , _a = -1 , _a = -1 , _a = None , ) -> Mapping[str, Any]: lowerCAmelCase_ = super().generate_dummy_inputs( processor.tokenizer , batch_size=_a , seq_length=_a , framework=_a ) lowerCAmelCase_ = super().generate_dummy_inputs( processor.image_processor , batch_size=_a , framework=_a ) return {**text_input_dict, **image_input_dict} @property def __a ( self ) -> int: return 14
226
0
def _A ( __snake_case :Union[str, Any] , __snake_case :Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = "" for i in table: res += inp[i - 1] return res def _A ( __snake_case :Union[str, Any] ) -> int: """simple docstring""" return data[1:] + data[0] def _A ( __snake_case :List[str] , __snake_case :List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = "" for i in range(len(lowerCAmelCase_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def _A ( __snake_case :Any , __snake_case :int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = int("0b" + data[0] + data[-1] , 2 ) __SCREAMING_SNAKE_CASE = int("0b" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def _A ( __snake_case :List[Any] , __snake_case :Optional[int] , __snake_case :List[Any] , __snake_case :Union[str, Any] , __snake_case :str ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = message[:4] __SCREAMING_SNAKE_CASE = message[4:] __SCREAMING_SNAKE_CASE = apply_table(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = xor(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = apply_sbox(lowerCAmelCase_ , temp[:4] ) # noqa: E741 __SCREAMING_SNAKE_CASE = apply_sbox(lowerCAmelCase_ , temp[4:] ) __SCREAMING_SNAKE_CASE = "0" * (2 - len(lowerCAmelCase_ )) + l # noqa: E741 __SCREAMING_SNAKE_CASE = "0" * (2 - len(lowerCAmelCase_ )) + r __SCREAMING_SNAKE_CASE = apply_table(l + r , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = xor(lowerCAmelCase_ , lowerCAmelCase_ ) return temp + right if __name__ == "__main__": _snake_case : Tuple = input('Enter 10 bit key: ') _snake_case : Dict = input('Enter 8 bit message: ') _snake_case : Tuple = [6, 3, 7, 4, 8, 5, 10, 9] _snake_case : Optional[int] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] _snake_case : Union[str, Any] = [2, 4, 3, 1] _snake_case : int = [2, 6, 3, 1, 4, 8, 5, 7] _snake_case : Any = [4, 1, 3, 5, 7, 2, 8, 6] _snake_case : int = [4, 1, 2, 3, 2, 3, 4, 1] _snake_case : str = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] _snake_case : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation _snake_case : Optional[Any] = apply_table(key, paa_table) _snake_case : List[str] = temp[:5] _snake_case : Optional[Any] = temp[5:] _snake_case : List[str] = left_shift(left) _snake_case : int = left_shift(right) _snake_case : Union[str, Any] = apply_table(left + right, pa_table) _snake_case : Tuple = left_shift(left) _snake_case : int = left_shift(right) _snake_case : int = left_shift(left) _snake_case : Tuple = left_shift(right) _snake_case : Optional[int] = apply_table(left + right, pa_table) # encryption _snake_case : Optional[int] = apply_table(message, IP) _snake_case : Union[str, Any] = function(expansion, sa, sa, keya, temp) _snake_case : str = temp[4:] + temp[:4] _snake_case : Union[str, Any] = function(expansion, sa, sa, keya, temp) _snake_case : Tuple = apply_table(temp, IP_inv) print('Cipher text is:', CT) # decryption _snake_case : Optional[Any] = apply_table(CT, IP) _snake_case : Any = function(expansion, sa, sa, keya, temp) _snake_case : int = temp[4:] + temp[:4] _snake_case : List[Any] = function(expansion, sa, sa, keya, temp) _snake_case : int = apply_table(temp, IP_inv) print('Plain text after decypting is:', PT)
693
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
250
0
from typing import Any class _SCREAMING_SNAKE_CASE : def __init__( self : List[str] , __lowerCamelCase : Any ): UpperCamelCase :Optional[Any] = data UpperCamelCase :Optional[int] = None def __repr__( self : List[Any] ): return F"""Node({self.data})""" class _SCREAMING_SNAKE_CASE : def __init__( self : Any ): UpperCamelCase :Any = None def __iter__( self : Optional[Any] ): UpperCamelCase :Optional[Any] = self.head while node: yield node.data UpperCamelCase :int = node.next def __len__( self : List[str] ): return sum(1 for _ in self ) def __repr__( self : Any ): return "->".join([str(__lowerCamelCase ) for item in self] ) def __getitem__( self : Optional[int] , __lowerCamelCase : int ): if not 0 <= index < len(self ): raise ValueError("""list index out of range.""" ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Any ): if not 0 <= index < len(self ): raise ValueError("""list index out of range.""" ) UpperCamelCase :Tuple = self.head for _ in range(__lowerCamelCase ): UpperCamelCase :Dict = current.next UpperCamelCase :Dict = data def _A ( self : Optional[int] , __lowerCamelCase : Any ): self.insert_nth(len(self ) , __lowerCamelCase ) def _A ( self : Optional[Any] , __lowerCamelCase : Any ): self.insert_nth(0 , __lowerCamelCase ) def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any ): if not 0 <= index <= len(self ): raise IndexError("""list index out of range""" ) UpperCamelCase :List[str] = Node(__lowerCamelCase ) if self.head is None: UpperCamelCase :Union[str, Any] = new_node elif index == 0: UpperCamelCase :Optional[Any] = self.head # link new_node to head UpperCamelCase :str = new_node else: UpperCamelCase :Dict = self.head for _ in range(index - 1 ): UpperCamelCase :List[str] = temp.next UpperCamelCase :Optional[Any] = temp.next UpperCamelCase :List[Any] = new_node def _A ( self : Dict ): # print every node data print(self ) def _A ( self : Dict ): return self.delete_nth(0 ) def _A ( self : Optional[int] ): # delete from tail return self.delete_nth(len(self ) - 1 ) def _A ( self : List[Any] , __lowerCamelCase : int = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("""List index out of range.""" ) UpperCamelCase :Dict = self.head # default first node if index == 0: UpperCamelCase :Optional[Any] = self.head.next else: UpperCamelCase :List[Any] = self.head for _ in range(index - 1 ): UpperCamelCase :Any = temp.next UpperCamelCase :Optional[int] = temp.next UpperCamelCase :List[Any] = temp.next.next return delete_node.data def _A ( self : List[Any] ): return self.head is None def _A ( self : Tuple ): UpperCamelCase :int = None UpperCamelCase :Dict = self.head while current: # Store the current node's next node. UpperCamelCase :Any = current.next # Make the current node's next point backwards UpperCamelCase :Dict = prev # Make the previous node be the current node UpperCamelCase :List[str] = current # Make the current node the next node (to progress iteration) UpperCamelCase :Any = next_node # Return prev in order to put the head at the end UpperCamelCase :List[Any] = prev def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" UpperCamelCase :Dict = LinkedList() assert linked_list.is_empty() is True assert str(__magic_name__ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__magic_name__ ) == i linked_list.insert_nth(__magic_name__ , i + 1 ) assert str(__magic_name__ ) == "->".join(str(__magic_name__ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__magic_name__ ) == "->".join(str(__magic_name__ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__magic_name__ ) == 9 assert str(__magic_name__ ) == "->".join(str(__magic_name__ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): UpperCamelCase :int = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__magic_name__ ) == "->".join(str(__magic_name__ ) for i in range(-8 , 1 ) ) def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" UpperCamelCase :Optional[int] = [ -9, 100, Node(7734_5112 ), """dlrow olleH""", 7, 5555, 0, -192.55555, """Hello, world!""", 77.9, Node(10 ), None, None, 12.20, ] UpperCamelCase :Optional[Any] = LinkedList() for i in test_input: linked_list.insert_tail(__magic_name__ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__magic_name__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head UpperCamelCase :int = linked_list.delete_head() assert result == -9 assert ( str(__magic_name__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail UpperCamelCase :Optional[Any] = linked_list.delete_tail() assert result == 12.2 assert ( str(__magic_name__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list UpperCamelCase :Any = linked_list.delete_nth(10 ) assert result is None assert ( str(__magic_name__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("""Hello again, world!""" ) ) assert ( str(__magic_name__ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__magic_name__ ) assert ( str(__magic_name__ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__magic_name__ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: """simple docstring""" from doctest import testmod testmod() UpperCamelCase :Tuple = LinkedList() linked_list.insert_head(input("""Inserting 1st at head """ ).strip() ) linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() ) print("""\nPrint list:""" ) linked_list.print_list() linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() ) linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() ) print("""\nPrint list:""" ) linked_list.print_list() print("""\nDelete head""" ) linked_list.delete_head() print("""Delete tail""" ) linked_list.delete_tail() print("""\nPrint list:""" ) linked_list.print_list() print("""\nReverse linked list""" ) linked_list.reverse() print("""\nPrint list:""" ) linked_list.print_list() print("""\nString representation of linked list:""" ) print(__magic_name__ ) print("""\nReading/changing Node data using indexing:""" ) print(f"""Element at Position 1: {linked_list[1]}""" ) UpperCamelCase :Optional[int] = input("""Enter New Value: """ ).strip() print("""New list:""" ) print(__magic_name__ ) print(f"""length of linked_list is : {len(__magic_name__ )}""" ) if __name__ == "__main__": main()
590
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : str = '''▁''' UpperCAmelCase_ : str = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } UpperCAmelCase_ : List[str] = { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } UpperCAmelCase_ : Any = { '''facebook/s2t-small-librispeech-asr''': 10_24, } UpperCAmelCase_ : Tuple = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] UpperCAmelCase_ : int = {'''mustc''': MUSTC_LANGS} class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Optional[int] = VOCAB_FILES_NAMES snake_case__ : Tuple = PRETRAINED_VOCAB_FILES_MAP snake_case__ : Union[str, Any] = MAX_MODEL_INPUT_SIZES snake_case__ : Optional[int] = ["""input_ids""", """attention_mask"""] snake_case__ : List[int] = [] def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : int , ): UpperCamelCase :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_upper_case=__lowerCamelCase , do_lower_case=__lowerCamelCase , tgt_lang=__lowerCamelCase , lang_codes=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) UpperCamelCase :List[str] = do_upper_case UpperCamelCase :int = do_lower_case UpperCamelCase :Dict = load_json(__lowerCamelCase ) UpperCamelCase :Optional[int] = {v: k for k, v in self.encoder.items()} UpperCamelCase :Optional[Any] = spm_file UpperCamelCase :str = load_spm(__lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: UpperCamelCase :Dict = lang_codes UpperCamelCase :List[str] = LANGUAGES[lang_codes] UpperCamelCase :List[Any] = [F"""<lang:{lang}>""" for lang in self.langs] UpperCamelCase :int = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs} UpperCamelCase :Union[str, Any] = self.lang_tokens UpperCamelCase :Tuple = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: UpperCamelCase :Optional[Any] = {} @property def _A ( self : Any ): return len(self.encoder ) @property def _A ( self : int ): return self._tgt_lang @tgt_lang.setter def _A ( self : Union[str, Any] , __lowerCamelCase : int ): UpperCamelCase :str = new_tgt_lang self.set_tgt_lang_special_tokens(__lowerCamelCase ) def _A ( self : Dict , __lowerCamelCase : str ): UpperCamelCase :int = self.lang_code_to_id[tgt_lang] UpperCamelCase :Optional[int] = [lang_code_id] def _A ( self : Union[str, Any] , __lowerCamelCase : str ): return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def _A ( self : Optional[int] , __lowerCamelCase : List[str] ): return self.encoder.get(__lowerCamelCase , self.encoder[self.unk_token] ) def _A ( self : Optional[int] , __lowerCamelCase : int ): return self.decoder.get(__lowerCamelCase , self.unk_token ) def _A ( self : Union[str, Any] , __lowerCamelCase : List[str] ): UpperCamelCase :Any = [] UpperCamelCase :Dict = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: UpperCamelCase :Dict = self.sp_model.decode(__lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " UpperCamelCase :Dict = [] else: current_sub_tokens.append(__lowerCamelCase ) UpperCamelCase :Dict = self.sp_model.decode(__lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _A ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) UpperCamelCase :Tuple = [1] * len(self.prefix_tokens ) UpperCamelCase :Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones def _A ( self : Any ): UpperCamelCase :Optional[Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ): UpperCamelCase :Optional[int] = self.__dict__.copy() UpperCamelCase :List[str] = None return state def __setstate__( self : int , __lowerCamelCase : Dict ): UpperCamelCase :List[str] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCamelCase :int = {} UpperCamelCase :Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs ) def _A ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): UpperCamelCase :Union[str, Any] = Path(__lowerCamelCase ) assert save_dir.is_dir(), F"""{save_directory} should be a directory""" UpperCamelCase :Any = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) UpperCamelCase :str = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(__lowerCamelCase , """wb""" ) as fi: UpperCamelCase :Any = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (str(__lowerCamelCase ), str(__lowerCamelCase )) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" UpperCamelCase :int = sentencepiece.SentencePieceProcessor(**__magic_name__ ) spm.Load(str(__magic_name__ ) ) return spm def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Union[Dict, List]: """simple docstring""" with open(__magic_name__ , """r""" ) as f: return json.load(__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : str ) -> None: """simple docstring""" with open(__magic_name__ , """w""" ) as f: json.dump(__magic_name__ , __magic_name__ , indent=2 )
590
1
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): lowercase__ = GPTaTokenizer lowercase__ = GPTaTokenizerFast lowercase__ = True lowercase__ = {"""add_prefix_space""": True} lowercase__ = False def _UpperCAmelCase ( self : List[Any]): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] lowercase_ = dict(zip(lowercase__ , range(len(lowercase__)))) lowercase_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowercase_ = {"""unk_token""": """<unk>"""} lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(lowercase__) + """\n""") with open(self.merges_file , """w""" , encoding="""utf-8""") as fp: fp.write("""\n""".join(lowercase__)) def _UpperCAmelCase ( self : Any , **lowerCAmelCase_ : int): """simple docstring""" kwargs.update(self.special_tokens_map) return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase__) def _UpperCAmelCase ( self : List[str] , **lowerCAmelCase_ : Tuple): """simple docstring""" kwargs.update(self.special_tokens_map) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str): """simple docstring""" lowercase_ = """lower newer""" lowercase_ = """lower newer""" return input_text, output_text def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) lowercase_ = """lower newer""" lowercase_ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] lowercase_ = tokenizer.tokenize(lowercase__ , add_prefix_space=lowercase__) self.assertListEqual(lowercase__ , lowercase__) lowercase_ = tokens + [tokenizer.unk_token] lowercase_ = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , lowercase__) def _UpperCAmelCase ( self : Union[str, Any]): """simple docstring""" if not self.test_rust_tokenizer: return lowercase_ = self.get_tokenizer() lowercase_ = self.get_rust_tokenizer(add_prefix_space=lowercase__) lowercase_ = """lower newer""" # Testing tokenization lowercase_ = tokenizer.tokenize(lowercase__ , add_prefix_space=lowercase__) lowercase_ = rust_tokenizer.tokenize(lowercase__) self.assertListEqual(lowercase__ , lowercase__) # Testing conversion to ids without special tokens lowercase_ = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__) lowercase_ = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__) self.assertListEqual(lowercase__ , lowercase__) # Testing conversion to ids with special tokens lowercase_ = self.get_rust_tokenizer(add_prefix_space=lowercase__) lowercase_ = tokenizer.encode(lowercase__ , add_prefix_space=lowercase__) lowercase_ = rust_tokenizer.encode(lowercase__) self.assertListEqual(lowercase__ , lowercase__) # Testing the unknown token lowercase_ = tokens + [rust_tokenizer.unk_token] lowercase_ = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase__) , lowercase__) def _UpperCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Any): """simple docstring""" pass def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str]=1_5): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''): lowercase_ = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__) # Simple input lowercase_ = """This is a simple input""" lowercase_ = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase_ = ("""This is a simple input""", """This is a pair""") lowercase_ = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding="""max_length""") # Simple input self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding="""max_length""") # Simple input self.assertRaises( lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding="""max_length""" , ) # Pair input self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding="""max_length""") # Pair input self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding="""max_length""") # Pair input self.assertRaises( lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding="""max_length""" , ) def _UpperCAmelCase ( self : int): """simple docstring""" lowercase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""") # Simple input lowercase_ = """This is a simple input""" lowercase_ = ["""This is a simple input looooooooong""", """This is a simple input"""] lowercase_ = ("""This is a simple input""", """This is a pair""") lowercase_ = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] lowercase_ = tokenizer.pad_token_id lowercase_ = tokenizer(lowercase__ , padding="""max_length""" , max_length=3_0 , return_tensors="""np""") lowercase_ = tokenizer(lowercase__ , padding=lowercase__ , truncate=lowercase__ , return_tensors="""np""") lowercase_ = tokenizer(*lowercase__ , padding="""max_length""" , max_length=6_0 , return_tensors="""np""") lowercase_ = tokenizer(lowercase__ , padding=lowercase__ , truncate=lowercase__ , return_tensors="""np""") # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 3_0) self.assertTrue(pad_token_id in out_s["""input_ids"""]) self.assertTrue(0 in out_s["""attention_mask"""]) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 3_3) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0]) self.assertFalse(0 in out_sa["""attention_mask"""][0]) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1]) self.assertTrue(0 in out_sa["""attention_mask"""][1]) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 6_0) self.assertTrue(pad_token_id in out_p["""input_ids"""]) self.assertTrue(0 in out_p["""attention_mask"""]) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 5_2) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0]) self.assertFalse(0 in out_pa["""attention_mask"""][0]) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1]) self.assertTrue(0 in out_pa["""attention_mask"""][1]) def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" lowercase_ = """$$$""" lowercase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase__ , add_bos_token=lowercase__) lowercase_ = """This is a simple input""" lowercase_ = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase_ = tokenizer.bos_token_id lowercase_ = tokenizer(lowercase__) lowercase_ = tokenizer(lowercase__) self.assertEqual(out_s.input_ids[0] , lowercase__) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids)) lowercase_ = tokenizer.decode(out_s.input_ids) lowercase_ = tokenizer.batch_decode(out_sa.input_ids) self.assertEqual(decode_s.split()[0] , lowercase__) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa)) def _UpperCAmelCase ( self : Any): """simple docstring""" pass def _UpperCAmelCase ( self : int): """simple docstring""" lowercase_ = [self.get_tokenizer(do_lower_case=lowercase__ , add_bos_token=lowercase__)] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}'''): lowercase_ = """Encode this.""" lowercase_ = """This one too please.""" lowercase_ = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__) encoded_sequence += tokenizer.encode(lowercase__ , add_special_tokens=lowercase__) lowercase_ = tokenizer.encode_plus( lowercase__ , lowercase__ , add_special_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , ) lowercase_ = encoded_sequence_dict["""input_ids"""] lowercase_ = encoded_sequence_dict["""special_tokens_mask"""] self.assertEqual(len(lowercase__) , len(lowercase__)) lowercase_ = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase__) ] lowercase_ = [x for x in filtered_sequence if x is not None] self.assertEqual(lowercase__ , lowercase__) @require_tokenizers class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def _UpperCAmelCase ( self : Tuple): """simple docstring""" lowercase_ = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase__) lowercase_ = """A photo of a cat""" lowercase_ = tokenizer.encode( lowercase__ , ) self.assertEqual(lowercase__ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8]) tokenizer.save_pretrained("""test_opt""") lowercase_ = AutoTokenizer.from_pretrained("""./test_opt""") lowercase_ = tokenizer.encode( lowercase__ , ) self.assertEqual(lowercase__ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8]) def _UpperCAmelCase ( self : Union[str, Any]): """simple docstring""" lowercase_ = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=lowercase__) lowercase_ = """A photo of a cat""" lowercase_ = tokenizer.encode( lowercase__ , ) # Same as above self.assertEqual(lowercase__ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8]) @unittest.skip("""This test is failing because of a bug in the fast tokenizer""") def _UpperCAmelCase ( self : Union[str, Any]): """simple docstring""" lowercase_ = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase__) lowercase_ = """bos""" lowercase_ = tokenizer.get_vocab()["""bos"""] lowercase_ = """A photo of a cat""" lowercase_ = tokenizer.encode( lowercase__ , ) # We changed the bos token self.assertEqual(lowercase__ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8]) tokenizer.save_pretrained("""./tok""") lowercase_ = AutoTokenizer.from_pretrained("""./tok""") self.assertTrue(tokenizer.is_fast) lowercase_ = tokenizer.encode( lowercase__ , ) self.assertEqual(lowercase__ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8])
567
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[str]] , SCREAMING_SNAKE_CASE__ : int , ): """simple docstring""" snake_case_ : Any = len(SCREAMING_SNAKE_CASE__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(SCREAMING_SNAKE_CASE__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : list[list[str]] = [] depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Print all the boards for board in boards: for column in board: print(SCREAMING_SNAKE_CASE__ ) print("""""" ) print(len(SCREAMING_SNAKE_CASE__ ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
480
0
import argparse import os import re snake_case__ : Dict = 'src/diffusers' # Pattern that looks at the indentation in a line. snake_case__ : int = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. snake_case__ : Optional[int] = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. snake_case__ : Optional[int] = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. snake_case__ : List[Any] = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. snake_case__ : Optional[Any] = re.compile(R'\[([^\]]+)\]') def __lowerCamelCase ( A__ : Any ) -> Tuple: lowerCamelCase_ : List[str] = _re_indent.search(A__ ) return "" if search is None else search.groups()[0] def __lowerCamelCase ( A__ : Optional[int] , A__ : Optional[Any]="" , A__ : Union[str, Any]=None , A__ : str=None ) -> List[str]: lowerCamelCase_ : List[str] = 0 lowerCamelCase_ : Tuple = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(A__ ): index += 1 lowerCamelCase_ : Union[str, Any] = ["""\n""".join(lines[:index] )] else: lowerCamelCase_ : str = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCamelCase_ : Any = [lines[index]] index += 1 while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(A__ ) ) if index < len(A__ ) - 1: lowerCamelCase_ : Optional[Any] = [lines[index + 1]] index += 1 else: lowerCamelCase_ : List[Any] = [] else: blocks.append("""\n""".join(A__ ) ) lowerCamelCase_ : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(A__ ) > 0: blocks.append("""\n""".join(A__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(A__ ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __lowerCamelCase ( A__ : Union[str, Any] ) -> int: def _inner(A__ : str ): return key(A__ ).lower().replace("""_""" , """""" ) return _inner def __lowerCamelCase ( A__ : Union[str, Any] , A__ : str=None ) -> Tuple: # If no key is provided, we use a noop. def noop(A__ : Dict ): return x if key is None: lowerCamelCase_ : int = noop # Constants are all uppercase, they go first. lowerCamelCase_ : str = [obj for obj in objects if key(A__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCamelCase_ : Any = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()] # Functions begin with a lowercase, they go last. lowerCamelCase_ : Optional[int] = [obj for obj in objects if not key(A__ )[0].isupper()] lowerCamelCase_ : List[str] = ignore_underscore(A__ ) return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) def __lowerCamelCase ( A__ : Optional[Any] ) -> int: # This inner function sort imports between [ ]. def _replace(A__ : Union[str, Any] ): lowerCamelCase_ : List[Any] = match.groups()[0] if "," not in imports: return f'''[{imports}]''' lowerCamelCase_ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ : List[str] = keys[:-1] return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(A__ )] ) + "]" lowerCamelCase_ : Any = import_statement.split("""\n""" ) if len(A__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCamelCase_ : Any = 2 if lines[1].strip() == """[""" else 1 lowerCamelCase_ : List[Any] = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCamelCase_ : Any = sort_objects(A__ , key=lambda A__ : x[1] ) lowerCamelCase_ : List[str] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(A__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCamelCase_ : Tuple = _re_bracket_content.sub(_replace , lines[1] ) else: lowerCamelCase_ : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ : Tuple = keys[:-1] lowerCamelCase_ : List[str] = get_indent(lines[1] ) + """, """.join([f'''"{k}"''' for k in sort_objects(A__ )] ) return "\n".join(A__ ) else: # Finally we have to deal with imports fitting on one line lowerCamelCase_ : Optional[int] = _re_bracket_content.sub(_replace , A__ ) return import_statement def __lowerCamelCase ( A__ : List[Any] , A__ : List[Any]=True ) -> Union[str, Any]: with open(A__ , """r""" ) as f: lowerCamelCase_ : List[str] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCamelCase_ : Any = split_code_in_indented_blocks( A__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(A__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCamelCase_ : Optional[int] = main_blocks[block_idx] lowerCamelCase_ : Dict = block.split("""\n""" ) # Get to the start of the imports. lowerCamelCase_ : Optional[int] = 0 while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCamelCase_ : Optional[int] = len(A__ ) else: line_idx += 1 if line_idx >= len(A__ ): continue # Ignore beginning and last line: they don't contain anything. lowerCamelCase_ : int = """\n""".join(block_lines[line_idx:-1] ) lowerCamelCase_ : str = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCamelCase_ : str = split_code_in_indented_blocks(A__ , indent_level=A__ ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCamelCase_ : Any = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCamelCase_ : Optional[Any] = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCamelCase_ : Optional[Any] = [(i, key) for i, key in enumerate(A__ ) if key is not None] lowerCamelCase_ : str = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCamelCase_ : int = 0 lowerCamelCase_ : Tuple = [] for i in range(len(A__ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowerCamelCase_ : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(A__ ) count += 1 # And we put our main block back together with its first and last line. lowerCamelCase_ : Union[str, Any] = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(A__ ): if check_only: return True else: print(f'''Overwriting {file}.''' ) with open(A__ , """w""" ) as f: f.write("""\n""".join(A__ ) ) def __lowerCamelCase ( A__ : Optional[int]=True ) -> Union[str, Any]: lowerCamelCase_ : Optional[Any] = [] for root, _, files in os.walk(A__ ): if "__init__.py" in files: lowerCamelCase_ : int = sort_imports(os.path.join(A__ , """__init__.py""" ) , check_only=A__ ) if result: lowerCamelCase_ : int = [os.path.join(A__ , """__init__.py""" )] if len(A__ ) > 0: raise ValueError(f'''Would overwrite {len(A__ )} files, run `make style`.''' ) if __name__ == "__main__": snake_case__ : Dict = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') snake_case__ : Dict = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
716
import qiskit def __lowerCamelCase ( A__ : int = 2 ) -> qiskit.result.counts.Counts: lowerCamelCase_ : List[Any] = qubits # Using Aer's simulator lowerCamelCase_ : Tuple = qiskit.Aer.get_backend("""aer_simulator""" ) # Creating a Quantum Circuit acting on the q register lowerCamelCase_ : int = qiskit.QuantumCircuit(A__ , A__ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , A__ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , A__ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(A__ ) ) , list(range(A__ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator lowerCamelCase_ : Union[str, Any] = qiskit.execute(A__ , A__ , shots=1000 ) return job.result().get_counts(A__ ) if __name__ == "__main__": print(F'Total count for various states are: {quantum_entanglement(3)}')
171
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE_ = 'FlavaImageProcessor' SCREAMING_SNAKE_CASE_ = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , SCREAMING_SNAKE_CASE_ , ) lowerCamelCase_ = kwargs.pop('feature_extractor' ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]: '''simple docstring''' if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: lowerCamelCase_ = self.tokenizer( text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) if images is not None: lowerCamelCase_ = self.image_processor( SCREAMING_SNAKE_CASE_ , return_image_mask=SCREAMING_SNAKE_CASE_ , return_codebook_pixels=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) if text is not None and images is not None: encoding.update(SCREAMING_SNAKE_CASE_ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = self.tokenizer.model_input_names lowerCamelCase_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE_ , ) return self.image_processor_class @property def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE_ , ) return self.image_processor
42
"""simple docstring""" import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def _A ( __lowercase , __lowercase=None ): """simple docstring""" lowerCamelCase__ = None if token is not None: lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""} lowerCamelCase__ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" lowerCamelCase__ = requests.get(__lowercase , headers=__lowercase ).json() lowerCamelCase__ = {} try: job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) lowerCamelCase__ = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(__lowercase ): lowerCamelCase__ = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase ).json() job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return job_links except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def _A ( __lowercase , __lowercase=None ): """simple docstring""" lowerCamelCase__ = None if token is not None: lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""} lowerCamelCase__ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" lowerCamelCase__ = requests.get(__lowercase , headers=__lowercase ).json() lowerCamelCase__ = {} try: artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) lowerCamelCase__ = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(__lowercase ): lowerCamelCase__ = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase ).json() artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) return artifacts except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def _A ( __lowercase , __lowercase , __lowercase , __lowercase ): """simple docstring""" lowerCamelCase__ = None if token is not None: lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""} lowerCamelCase__ = requests.get(__lowercase , headers=__lowercase , allow_redirects=__lowercase ) lowerCamelCase__ = result.headers["""Location"""] lowerCamelCase__ = requests.get(__lowercase , allow_redirects=__lowercase ) lowerCamelCase__ = os.path.join(__lowercase , f"""{artifact_name}.zip""" ) with open(__lowercase , """wb""" ) as fp: fp.write(response.content ) def _A ( __lowercase , __lowercase=None ): """simple docstring""" lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = None with zipfile.ZipFile(__lowercase ) as z: for filename in z.namelist(): if not os.path.isdir(__lowercase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(__lowercase ) as f: for line in f: lowerCamelCase__ = line.decode("""UTF-8""" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs lowerCamelCase__ = line[: line.index(""": """ )] lowerCamelCase__ = line[line.index(""": """ ) + len(""": """ ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("""FAILED """ ): # `test` is the test method that failed lowerCamelCase__ = line[len("""FAILED """ ) :] failed_tests.append(__lowercase ) elif filename == "job_name.txt": lowerCamelCase__ = line if len(__lowercase ) != len(__lowercase ): raise ValueError( f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__lowercase )} for `errors` """ f"""and {len(__lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" """ problem.""" ) lowerCamelCase__ = None if job_name and job_links: lowerCamelCase__ = job_links.get(__lowercase , __lowercase ) # A list with elements of the form (line of error, error, failed test) lowerCamelCase__ = [x + [y] + [job_link] for x, y in zip(__lowercase , __lowercase )] return result def _A ( __lowercase , __lowercase=None ): """simple docstring""" lowerCamelCase__ = [] lowerCamelCase__ = [os.path.join(__lowercase , __lowercase ) for p in os.listdir(__lowercase ) if p.endswith(""".zip""" )] for p in paths: errors.extend(get_errors_from_single_artifact(__lowercase , job_links=__lowercase ) ) return errors def _A ( __lowercase , __lowercase=None ): """simple docstring""" lowerCamelCase__ = Counter() counter.update([x[1] for x in logs] ) lowerCamelCase__ = counter.most_common() lowerCamelCase__ = {} for error, count in counts: if error_filter is None or error not in error_filter: lowerCamelCase__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]} lowerCamelCase__ = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) ) return r def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = test.split("""::""" )[0] if test.startswith("""tests/models/""" ): lowerCamelCase__ = test.split("""/""" )[2] else: lowerCamelCase__ = None return test def _A ( __lowercase , __lowercase=None ): """simple docstring""" lowerCamelCase__ = [(x[0], x[1], get_model(x[2] )) for x in logs] lowerCamelCase__ = [x for x in logs if x[2] is not None] lowerCamelCase__ = {x[2] for x in logs} lowerCamelCase__ = {} for test in tests: lowerCamelCase__ = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) lowerCamelCase__ = counter.most_common() lowerCamelCase__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} lowerCamelCase__ = sum(error_counts.values() ) if n_errors > 0: lowerCamelCase__ = {"""count""": n_errors, """errors""": error_counts} lowerCamelCase__ = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) ) return r def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = """| no. | error | status |""" lowerCamelCase__ = """|-:|:-|:-|""" lowerCamelCase__ = [header, sep] for error in reduced_by_error: lowerCamelCase__ = reduced_by_error[error]["""count"""] lowerCamelCase__ = f"""| {count} | {error[:100]} | |""" lines.append(__lowercase ) return "\n".join(__lowercase ) def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = """| model | no. of errors | major error | count |""" lowerCamelCase__ = """|-:|-:|-:|-:|""" lowerCamelCase__ = [header, sep] for model in reduced_by_model: lowerCamelCase__ = reduced_by_model[model]["""count"""] lowerCamelCase__ , lowerCamelCase__ = list(reduced_by_model[model]["""errors"""].items() )[0] lowerCamelCase__ = f"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(__lowercase ) return "\n".join(__lowercase ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") __magic_name__ = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __magic_name__ = get_job_links(args.workflow_run_id, token=args.token) __magic_name__ = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __magic_name__ = k.find(""" / """) __magic_name__ = k[index + len(""" / """) :] __magic_name__ = v with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __magic_name__ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __magic_name__ = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __magic_name__ = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __magic_name__ = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __magic_name__ = reduce_by_error(errors) __magic_name__ = reduce_by_model(errors) __magic_name__ = make_github_table(reduced_by_error) __magic_name__ = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa) with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa)
129
0
"""simple docstring""" import collections import os import re from pathlib import Path UpperCAmelCase = "src/transformers" # Matches is_xxx_available() UpperCAmelCase = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} UpperCAmelCase = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available UpperCAmelCase = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase = re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo UpperCAmelCase = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: UpperCAmelCase = re.compile(r"^\s*try:") # Catches a line with else: UpperCAmelCase = re.compile(r"^\s*else:") def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]: '''simple docstring''' if _re_test_backend.search(__lowerCAmelCase ) is None: return None lowercase_ = [b[0] for b in _re_backend.findall(__lowerCAmelCase )] backends.sort() return "_and_".join(__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase_ = f.readlines() lowercase_ = 0 while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__lowerCAmelCase ): return None # First grab the objects without a specific backend in _import_structure lowercase_ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: lowercase_ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__lowerCAmelCase ): lowercase_ = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0] lowercase_ = re.findall(R"""\[([^\]]+)\]""" , __lowerCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue lowercase_ = _re_import_struct_key_value.search(__lowerCAmelCase ) if single_line_import_search is not None: lowercase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 lowercase_ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. lowercase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): lowercase_ = lines[line_index] if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None: lowercase_ = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(""", """ ) lowercase_ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif _re_between_brackets.search(__lowerCAmelCase ) is not None: lowercase_ = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(""", """ ) lowercase_ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif _re_quote_object.search(__lowerCAmelCase ) is not None: objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 lowercase_ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowercase_ = [] while ( line_index < len(__lowerCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): lowercase_ = lines[line_index] lowercase_ = _re_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 lowercase_ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(__lowerCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowercase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): lowercase_ = lines[line_index] lowercase_ = _re_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 lowercase_ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' def find_duplicates(__lowerCAmelCase ): return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowercase_ = [] for key in import_dict_objects.keys(): lowercase_ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowercase_ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowercase_ = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def _SCREAMING_SNAKE_CASE () -> List[Any]: '''simple docstring''' lowercase_ = [] for root, _, files in os.walk(__lowerCAmelCase ): if "__init__.py" in files: lowercase_ = os.path.join(__lowerCAmelCase , """__init__.py""" ) lowercase_ = parse_init(__lowerCAmelCase ) if objects is not None: lowercase_ = analyze_results(*__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: lowercase_ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(__lowerCAmelCase ) ) if len(__lowerCAmelCase ) > 0: raise ValueError("""\n\n""".join(__lowerCAmelCase ) ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: '''simple docstring''' lowercase_ = [] for path, directories, files in os.walk(__lowerCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(__lowerCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0: continue lowercase_ = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) ) lowercase_ = short_path.replace(os.path.sep , """.""" ) submodules.append(__lowerCAmelCase ) for fname in files: if fname == "__init__.py": continue lowercase_ = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) ) lowercase_ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(__lowerCAmelCase ) return submodules UpperCAmelCase = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: '''simple docstring''' from transformers.utils import direct_transformers_import lowercase_ = direct_transformers_import(__lowerCAmelCase ) lowercase_ = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" ) as f: lowercase_ = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , __lowerCAmelCase ) ) ) lowercase_ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(__lowerCAmelCase ) > 0: lowercase_ = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
705
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ "WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST", "WavLMForAudioFrameClassification", "WavLMForCTC", "WavLMForSequenceClassification", "WavLMForXVector", "WavLMModel", "WavLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
100
0
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar a_ :Any = TypeVar('T') class lowercase ( Generic[T] ): def __init__( self : Union[str, Any] , _lowercase : bool = True ): SCREAMING_SNAKE_CASE__ : dict[T, list[T]] = {} # dictionary of lists SCREAMING_SNAKE_CASE__ : int = directed def lowercase__ ( self : Optional[Any] , _lowercase : T , _lowercase : T ): if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowercase ) self.adj_list[destination_vertex].append(_lowercase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowercase ) SCREAMING_SNAKE_CASE__ : List[str] = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(_lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: SCREAMING_SNAKE_CASE__ : List[str] = [destination_vertex] SCREAMING_SNAKE_CASE__ : str = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowercase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: SCREAMING_SNAKE_CASE__ : List[Any] = [destination_vertex] SCREAMING_SNAKE_CASE__ : Any = [] return self def __repr__( self : Tuple ): return pformat(self.adj_list )
35
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ): """simple docstring""" lowercase : torch.FloatTensor lowercase : Optional[torch.FloatTensor] = None def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=0.999 , _lowerCAmelCase : str="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCAmelCase : Any ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCAmelCase : Tuple ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __UpperCamelCase : Dict = [] for i in range(_lowerCAmelCase ): __UpperCamelCase : Union[str, Any] = i / num_diffusion_timesteps __UpperCamelCase : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) ) return torch.tensor(_lowerCAmelCase , dtype=torch.floataa ) class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" @register_to_config def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = "fixed_small_log" , __UpperCamelCase = True , __UpperCamelCase = 1.0 , __UpperCamelCase = "epsilon" , __UpperCamelCase = "squaredcos_cap_v2" , ) -> Tuple: '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) __UpperCamelCase : List[str] = betas_for_alpha_bar(__UpperCamelCase ) __UpperCamelCase : Optional[Any] = 1.0 - self.betas __UpperCamelCase : List[Any] = torch.cumprod(self.alphas , dim=0 ) __UpperCamelCase : Optional[int] = torch.tensor(1.0 ) # standard deviation of the initial noise distribution __UpperCamelCase : List[str] = 1.0 # setable values __UpperCamelCase : List[str] = None __UpperCamelCase : str = torch.from_numpy(np.arange(0 , __UpperCamelCase )[::-1].copy() ) __UpperCamelCase : Union[str, Any] = variance_type def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> torch.FloatTensor: '''simple docstring''' return sample def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase : Any = num_inference_steps __UpperCamelCase : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) __UpperCamelCase : List[str] = (np.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) __UpperCamelCase : Any = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Any: '''simple docstring''' if prev_timestep is None: __UpperCamelCase : Union[str, Any] = t - 1 __UpperCamelCase : Union[str, Any] = self.alphas_cumprod[t] __UpperCamelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __UpperCamelCase : int = 1 - alpha_prod_t __UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __UpperCamelCase : str = self.betas[t] else: __UpperCamelCase : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __UpperCamelCase : Union[str, Any] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: __UpperCamelCase : Any = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": __UpperCamelCase : Dict = torch.log(torch.clamp(__UpperCamelCase , min=1E-20 ) ) __UpperCamelCase : Dict = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler __UpperCamelCase : Tuple = variance.log() __UpperCamelCase : str = beta.log() __UpperCamelCase : Union[str, Any] = (predicted_variance + 1) / 2 __UpperCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]: '''simple docstring''' __UpperCamelCase : Dict = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": __UpperCamelCase , __UpperCamelCase : Optional[Any] = torch.split(__UpperCamelCase , sample.shape[1] , dim=1 ) else: __UpperCamelCase : Any = None # 1. compute alphas, betas if prev_timestep is None: __UpperCamelCase : List[str] = t - 1 __UpperCamelCase : Optional[int] = self.alphas_cumprod[t] __UpperCamelCase : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __UpperCamelCase : Tuple = 1 - alpha_prod_t __UpperCamelCase : Tuple = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __UpperCamelCase : Any = self.betas[t] __UpperCamelCase : Any = self.alphas[t] else: __UpperCamelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev __UpperCamelCase : Union[str, Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __UpperCamelCase : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __UpperCamelCase : Any = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: __UpperCamelCase : Optional[int] = torch.clamp( __UpperCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __UpperCamelCase : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t __UpperCamelCase : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __UpperCamelCase : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __UpperCamelCase : int = 0 if t > 0: __UpperCamelCase : str = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=__UpperCamelCase , device=model_output.device ) __UpperCamelCase : int = self._get_variance( __UpperCamelCase , predicted_variance=__UpperCamelCase , prev_timestep=__UpperCamelCase , ) if self.variance_type == "fixed_small_log": __UpperCamelCase : Any = variance elif self.variance_type == "learned_range": __UpperCamelCase : List[Any] = (0.5 * variance).exp() else: raise ValueError( f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' " for the UnCLIPScheduler." ) __UpperCamelCase : Tuple = variance * variance_noise __UpperCamelCase : Optional[int] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> torch.FloatTensor: '''simple docstring''' __UpperCamelCase : Union[str, Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) __UpperCamelCase : Any = timesteps.to(original_samples.device ) __UpperCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 __UpperCamelCase : List[str] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): __UpperCamelCase : List[Any] = sqrt_alpha_prod.unsqueeze(-1 ) __UpperCamelCase : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 __UpperCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): __UpperCamelCase : Tuple = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) __UpperCamelCase : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
327
0
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger __A =get_logger(__name__) __A =R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n" class UpperCAmelCase__ : '''simple docstring''' @add_start_docstrings(a_ ) def __call__( self : List[Any] , a_ : jnp.ndarray , a_ : jnp.ndarray ): '''simple docstring''' raise NotImplementedError( F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class UpperCAmelCase__ : '''simple docstring''' @add_start_docstrings(a_ ) def __call__( self : Any , a_ : jnp.ndarray , a_ : jnp.ndarray ): '''simple docstring''' raise NotImplementedError( F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' @add_start_docstrings(a_ ) def __call__( self : str , a_ : jnp.ndarray , a_ : jnp.ndarray , a_ : int , **a_ : Tuple ): '''simple docstring''' for processor in self: __UpperCAmelCase : int = inspect.signature(processor.__call__ ).parameters if len(a_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F'Make sure that all the required parameters: {list(function_args.keys() )} for ' F'{processor.__class__} are passed to the logits processor.' ) __UpperCAmelCase : Optional[Any] = processor(a_ , a_ , a_ , **a_ ) else: __UpperCAmelCase : List[str] = processor(a_ , a_ , a_ ) return scores class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' def __init__( self : int , a_ : float ): '''simple docstring''' if not isinstance(a_ , a_ ) or not (temperature > 0): raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' ) __UpperCAmelCase : Dict = temperature def __call__( self : Optional[int] , a_ : jnp.ndarray , a_ : jnp.ndarray , a_ : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = scores / self.temperature return scores class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' def __init__( self : List[Any] , a_ : float , a_ : float = -float('''Inf''' ) , a_ : int = 1 ): '''simple docstring''' if not isinstance(a_ , a_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' ) if not isinstance(a_ , a_ ) or (min_tokens_to_keep < 1): raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' ) __UpperCAmelCase : List[Any] = top_p __UpperCAmelCase : Union[str, Any] = filter_value __UpperCAmelCase : List[str] = min_tokens_to_keep def __call__( self : List[str] , a_ : jnp.ndarray , a_ : jnp.ndarray , a_ : int ): '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = lax.top_k(a_ , scores.shape[-1] ) __UpperCAmelCase : List[str] = jnp.full_like(a_ , self.filter_value ) __UpperCAmelCase : Tuple = jax.nn.softmax(a_ , axis=-1 ).cumsum(axis=-1 ) __UpperCAmelCase : List[str] = cumulative_probs < self.top_p # include the token that is higher than top_p as well __UpperCAmelCase : int = jnp.roll(a_ , 1 ) score_mask |= score_mask.at[:, 0].set(a_ ) # min tokens to keep __UpperCAmelCase : int = score_mask.at[:, : self.min_tokens_to_keep].set(a_ ) __UpperCAmelCase : Optional[int] = jnp.where(a_ , a_ , a_ ) __UpperCAmelCase : Union[str, Any] = jax.lax.sort_key_val(a_ , a_ )[-1] return next_scores class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' def __init__( self : Dict , a_ : int , a_ : float = -float('''Inf''' ) , a_ : int = 1 ): '''simple docstring''' if not isinstance(a_ , a_ ) or top_k <= 0: raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' ) __UpperCAmelCase : Union[str, Any] = max(a_ , a_ ) __UpperCAmelCase : Any = filter_value def __call__( self : Tuple , a_ : jnp.ndarray , a_ : jnp.ndarray , a_ : int ): '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = scores.shape __UpperCAmelCase : Any = jnp.full(batch_size * vocab_size , self.filter_value ) __UpperCAmelCase : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check __UpperCAmelCase , __UpperCAmelCase : Optional[int] = lax.top_k(a_ , a_ ) __UpperCAmelCase : Optional[Any] = jnp.broadcast_to((jnp.arange(a_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() __UpperCAmelCase : str = topk_scores.flatten() __UpperCAmelCase : Optional[Any] = topk_indices.flatten() + shift __UpperCAmelCase : Dict = next_scores_flat.at[topk_indices_flat].set(a_ ) __UpperCAmelCase : List[str] = next_scores_flat.reshape(a_ , a_ ) return next_scores class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' def __init__( self : Optional[int] , a_ : int ): '''simple docstring''' __UpperCAmelCase : Dict = bos_token_id def __call__( self : Tuple , a_ : jnp.ndarray , a_ : jnp.ndarray , a_ : int ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = jnp.full(scores.shape , -float('''inf''' ) ) __UpperCAmelCase : List[str] = 1 - jnp.bool_(cur_len - 1 ) __UpperCAmelCase : str = jnp.where(a_ , new_scores.at[:, self.bos_token_id].set(0 ) , a_ ) return scores class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' def __init__( self : Any , a_ : int , a_ : int ): '''simple docstring''' __UpperCAmelCase : Dict = max_length __UpperCAmelCase : Optional[int] = eos_token_id def __call__( self : Union[str, Any] , a_ : jnp.ndarray , a_ : jnp.ndarray , a_ : int ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = jnp.full(scores.shape , -float('''inf''' ) ) __UpperCAmelCase : int = 1 - jnp.bool_(cur_len - self.max_length + 1 ) __UpperCAmelCase : List[Any] = jnp.where(a_ , new_scores.at[:, self.eos_token_id].set(0 ) , a_ ) return scores class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' def __init__( self : str , a_ : int , a_ : int ): '''simple docstring''' if not isinstance(a_ , a_ ) or min_length < 0: raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' ) if not isinstance(a_ , a_ ) or eos_token_id < 0: raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' ) __UpperCAmelCase : List[Any] = min_length __UpperCAmelCase : Union[str, Any] = eos_token_id def __call__( self : Union[str, Any] , a_ : jnp.ndarray , a_ : jnp.ndarray , a_ : int ): '''simple docstring''' __UpperCAmelCase : Any = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) __UpperCAmelCase : Union[str, Any] = jnp.where(a_ , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , a_ ) return scores class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' def __init__( self : Optional[int] , a_ : Optional[Any] , a_ : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(a_ ) __UpperCAmelCase : Union[str, Any] = begin_index def __call__( self : List[str] , a_ : int , a_ : Union[str, Any] , a_ : int ): '''simple docstring''' __UpperCAmelCase : str = 1 - jnp.bool_(cur_len - self.begin_index ) __UpperCAmelCase : Union[str, Any] = jnp.where(a_ , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , a_ ) return scores class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' def __init__( self : Tuple , a_ : list ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = list(a_ ) def __call__( self : Any , a_ : jnp.ndarray , a_ : jnp.ndarray , a_ : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) ) return scores class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' def __init__( self : Optional[int] , a_ : Dict ): '''simple docstring''' __UpperCAmelCase : int = dict(a_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. __UpperCAmelCase : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: __UpperCAmelCase : Any = force_token_array.at[index].set(a_ ) __UpperCAmelCase : Optional[int] = jnp.intaa(a_ ) def __call__( self : Optional[int] , a_ : jnp.ndarray , a_ : jnp.ndarray , a_ : int ): '''simple docstring''' def _force_token(a_ : Dict ): __UpperCAmelCase : Any = scores.shape[0] __UpperCAmelCase : List[str] = self.force_token_array[generation_idx] __UpperCAmelCase : Union[str, Any] = jnp.ones_like(a_ , dtype=scores.dtype ) * -float('''inf''' ) __UpperCAmelCase : Optional[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) __UpperCAmelCase : int = lax.dynamic_update_slice(a_ , a_ , (0, current_token) ) return new_scores __UpperCAmelCase : str = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(a_ ) , lambda: scores , ) , ) return scores class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' def __init__( self : Dict , a_ : List[Any] , a_ : str , a_ : List[str] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = generate_config.eos_token_id __UpperCAmelCase : int = generate_config.no_timestamps_token_id __UpperCAmelCase : Optional[int] = generate_config.no_timestamps_token_id + 1 __UpperCAmelCase : str = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(a_ , '''max_initial_timestamp_index''' ): __UpperCAmelCase : Dict = generate_config.max_initial_timestamp_index else: __UpperCAmelCase : List[Any] = model_config.vocab_size if self.max_initial_timestamp_index is None: __UpperCAmelCase : Optional[int] = model_config.vocab_size def __call__( self : Optional[Any] , a_ : Dict , a_ : List[Any] , a_ : List[str] ): '''simple docstring''' __UpperCAmelCase : int = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) ) def handle_pairs(a_ : List[str] , a_ : str ): __UpperCAmelCase : List[str] = jnp.where((cur_len - self.begin_index) >= 1 , a_ , a_ ) __UpperCAmelCase : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , a_ , ) __UpperCAmelCase : Any = jnp.where((cur_len - self.begin_index) < 2 , a_ , a_ ) __UpperCAmelCase : Any = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , a_ , a_ , ) return jnp.where( a_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , a_ , ) __UpperCAmelCase : List[Any] = jax.vmap(a_ )(a_ , a_ ) __UpperCAmelCase : List[str] = jnp.where(cur_len == self.begin_index , a_ , a_ ) __UpperCAmelCase : Tuple = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , a_ , ) __UpperCAmelCase : Any = self.timestamp_begin + self.max_initial_timestamp_index __UpperCAmelCase : Optional[int] = jnp.where( a_ , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , a_ , ) # if sum of probability over timestamps is above any other token, sample timestamp __UpperCAmelCase : str = jax.nn.log_softmax(a_ , axis=-1 ) def handle_cumulative_probs(a_ : Any , a_ : Union[str, Any] ): __UpperCAmelCase : Tuple = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) __UpperCAmelCase : Any = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , a_ , ) __UpperCAmelCase : Union[str, Any] = jax.vmap(a_ )(a_ , a_ ) return scores
241
def a ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] ): '''simple docstring''' __UpperCAmelCase : Dict = len(_UpperCAmelCase ) print('''The following activities are selected:''' ) # The first activity is always selected __UpperCAmelCase : str = 0 print(_UpperCAmelCase , end=''',''' ) # Consider rest of the activities for j in range(_UpperCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(_UpperCAmelCase , end=''',''' ) __UpperCAmelCase : int = j if __name__ == "__main__": import doctest doctest.testmod() __A =[1, 3, 0, 5, 8, 5] __A =[2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
241
1
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def __lowercase( UpperCAmelCase__ ): """simple docstring""" return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def __lowercase( ): """simple docstring""" lowerCamelCase = ArgumentParser( "HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=UpperCAmelCase__ ) lowerCamelCase = parser.add_subparsers(help="datasets-cli command helpers" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(UpperCAmelCase__ ) EnvironmentCommand.register_subcommand(UpperCAmelCase__ ) TestCommand.register_subcommand(UpperCAmelCase__ ) RunBeamCommand.register_subcommand(UpperCAmelCase__ ) DummyDataCommand.register_subcommand(UpperCAmelCase__ ) # Parse args lowerCamelCase , lowerCamelCase = parser.parse_known_args() if not hasattr(UpperCAmelCase__ , "func" ): parser.print_help() exit(1 ) lowerCamelCase = parse_unknown_args(UpperCAmelCase__ ) # Run lowerCamelCase = args.func(UpperCAmelCase__ , **UpperCAmelCase__ ) service.run() if __name__ == "__main__": main()
623
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig a_ : Optional[int] = logging.get_logger(__name__) # General docstring a_ : List[str] = 'RegNetConfig' # Base docstring a_ : Union[str, Any] = 'facebook/regnet-y-040' a_ : Optional[Any] = [1, 1_0_8_8, 7, 7] # Image classification docstring a_ : Dict = 'facebook/regnet-y-040' a_ : List[Any] = 'tabby, tabby cat' a_ : Union[str, Any] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__(self , __a , __a , __a = 3 , __a = 1 , __a = 1 , __a = "relu" , ): '''simple docstring''' super().__init__() lowerCamelCase = nn.Convad( __a , __a , kernel_size=__a , stride=__a , padding=kernel_size // 2 , groups=__a , bias=__a , ) lowerCamelCase = nn.BatchNormad(__a ) lowerCamelCase = ACTaFN[activation] if activation is not None else nn.Identity() def _a (self , __a ): '''simple docstring''' lowerCamelCase = self.convolution(__a ) lowerCamelCase = self.normalization(__a ) lowerCamelCase = self.activation(__a ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__(self , __a ): '''simple docstring''' super().__init__() lowerCamelCase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowerCamelCase = config.num_channels def _a (self , __a ): '''simple docstring''' lowerCamelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) lowerCamelCase = self.embedder(__a ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__(self , __a , __a , __a = 2 ): '''simple docstring''' super().__init__() lowerCamelCase = nn.Convad(__a , __a , kernel_size=1 , stride=__a , bias=__a ) lowerCamelCase = nn.BatchNormad(__a ) def _a (self , __a ): '''simple docstring''' lowerCamelCase = self.convolution(__a ) lowerCamelCase = self.normalization(__a ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__(self , __a , __a ): '''simple docstring''' super().__init__() lowerCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) lowerCamelCase = nn.Sequential( nn.Convad(__a , __a , kernel_size=1 ) , nn.ReLU() , nn.Convad(__a , __a , kernel_size=1 ) , nn.Sigmoid() , ) def _a (self , __a ): '''simple docstring''' lowerCamelCase = self.pooler(__a ) lowerCamelCase = self.attention(__a ) lowerCamelCase = hidden_state * attention return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__(self , __a , __a , __a , __a = 1 ): '''simple docstring''' super().__init__() lowerCamelCase = in_channels != out_channels or stride != 1 lowerCamelCase = max(1 , out_channels // config.groups_width ) lowerCamelCase = ( RegNetShortCut(__a , __a , stride=__a ) if should_apply_shortcut else nn.Identity() ) lowerCamelCase = nn.Sequential( RegNetConvLayer(__a , __a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__a , __a , stride=__a , groups=__a , activation=config.hidden_act ) , RegNetConvLayer(__a , __a , kernel_size=1 , activation=__a ) , ) lowerCamelCase = ACTaFN[config.hidden_act] def _a (self , __a ): '''simple docstring''' lowerCamelCase = hidden_state lowerCamelCase = self.layer(__a ) lowerCamelCase = self.shortcut(__a ) hidden_state += residual lowerCamelCase = self.activation(__a ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__(self , __a , __a , __a , __a = 1 ): '''simple docstring''' super().__init__() lowerCamelCase = in_channels != out_channels or stride != 1 lowerCamelCase = max(1 , out_channels // config.groups_width ) lowerCamelCase = ( RegNetShortCut(__a , __a , stride=__a ) if should_apply_shortcut else nn.Identity() ) lowerCamelCase = nn.Sequential( RegNetConvLayer(__a , __a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__a , __a , stride=__a , groups=__a , activation=config.hidden_act ) , RegNetSELayer(__a , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__a , __a , kernel_size=1 , activation=__a ) , ) lowerCamelCase = ACTaFN[config.hidden_act] def _a (self , __a ): '''simple docstring''' lowerCamelCase = hidden_state lowerCamelCase = self.layer(__a ) lowerCamelCase = self.shortcut(__a ) hidden_state += residual lowerCamelCase = self.activation(__a ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__(self , __a , __a , __a , __a = 2 , __a = 2 , ): '''simple docstring''' super().__init__() lowerCamelCase = RegNetXLayer if config.layer_type == "x" else RegNetYLayer lowerCamelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( __a , __a , __a , stride=__a , ) , *[layer(__a , __a , __a ) for _ in range(depth - 1 )] , ) def _a (self , __a ): '''simple docstring''' lowerCamelCase = self.layers(__a ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__(self , __a ): '''simple docstring''' super().__init__() lowerCamelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( __a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowerCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(__a , config.depths[1:] ): self.stages.append(RegNetStage(__a , __a , __a , depth=__a ) ) def _a (self , __a , __a = False , __a = True ): '''simple docstring''' lowerCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCamelCase = hidden_states + (hidden_state,) lowerCamelCase = stage_module(__a ) if output_hidden_states: lowerCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a ) class lowerCamelCase__ ( UpperCAmelCase_): """simple docstring""" _A = RegNetConfig _A = 'regnet' _A = 'pixel_values' _A = True def _a (self , __a ): '''simple docstring''' if isinstance(__a , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(__a , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def _a (self , __a , __a=False ): '''simple docstring''' if isinstance(__a , __a ): lowerCamelCase = value a_ : Dict = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' a_ : Optional[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase_ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class lowerCamelCase__ ( UpperCAmelCase_): """simple docstring""" def __init__(self , __a ): '''simple docstring''' super().__init__(__a ) lowerCamelCase = config lowerCamelCase = RegNetEmbeddings(__a ) lowerCamelCase = RegNetEncoder(__a ) lowerCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _a (self , __a , __a = None , __a = None ): '''simple docstring''' lowerCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase = self.embedder(__a ) lowerCamelCase = self.encoder( __a , output_hidden_states=__a , return_dict=__a ) lowerCamelCase = encoder_outputs[0] lowerCamelCase = self.pooler(__a ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__a , pooler_output=__a , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase_ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class lowerCamelCase__ ( UpperCAmelCase_): """simple docstring""" def __init__(self , __a ): '''simple docstring''' super().__init__(__a ) lowerCamelCase = config.num_labels lowerCamelCase = RegNetModel(__a ) # classification head lowerCamelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _a (self , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase = self.regnet(__a , output_hidden_states=__a , return_dict=__a ) lowerCamelCase = outputs.pooler_output if return_dict else outputs[1] lowerCamelCase = self.classifier(__a ) lowerCamelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCamelCase = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCamelCase = "single_label_classification" else: lowerCamelCase = "multi_label_classification" if self.config.problem_type == "regression": lowerCamelCase = MSELoss() if self.num_labels == 1: lowerCamelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCamelCase = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": lowerCamelCase = CrossEntropyLoss() lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCamelCase = BCEWithLogitsLoss() lowerCamelCase = loss_fct(__a , __a ) if not return_dict: lowerCamelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
623
1
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase ( UpperCAmelCase_ : Features )-> List[Any]: """simple docstring""" a =np.inf def set_batch_size(UpperCAmelCase_ : FeatureType ) -> None: nonlocal batch_size if isinstance(snake_case_ , snake_case_ ): a =min(snake_case_ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(snake_case_ , snake_case_ ): a =min(snake_case_ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(snake_case_ , snake_case_ ) and feature.dtype == "binary": a =min(snake_case_ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(snake_case_ , snake_case_ ) return None if batch_size is np.inf else batch_size class UpperCAmelCase__ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) a =path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} a =_PACKAGED_DATASETS_MODULES["parquet"][1] a =Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def lowerCAmelCase__ ( self ): # Build iterable dataset if self.streaming: a =self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: a =None a =None a =None a =None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) a =self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class UpperCAmelCase__ : '''simple docstring''' def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ): a =dataset a =path_or_buf a =batch_size or get_writer_batch_size(dataset.features ) a =parquet_writer_kwargs def lowerCAmelCase__ ( self ): a =self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , """wb+""" ) as buffer: a =self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: a =self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ): a =0 a =parquet_writer_kwargs.pop("""path_or_buf""" , __A ) a =self.dataset.features.arrow_schema a =pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ): a =query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
703
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase = { '''configuration_trajectory_transformer''': [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrajectoryTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrajectoryTransformerModel''', '''TrajectoryTransformerPreTrainedModel''', '''load_tf_weights_in_trajectory_transformer''', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
321
0
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() def __magic_name__ (self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) SCREAMING_SNAKE_CASE__ : Any = '''A painting of a squirrel eating a burger''' SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count() SCREAMING_SNAKE_CASE__ : Optional[Any] = num_samples * [prompt] SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe.prepare_inputs(_lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[str] = replicate(_lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Dict = shard(_lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Dict = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.random.split(_lowerCamelCase , jax.device_count() ) SCREAMING_SNAKE_CASE__ : Dict = sd_pipe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_inference_steps=25 , jit=_lowerCamelCase )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) SCREAMING_SNAKE_CASE__ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = images[0, 2_53:2_56, 2_53:2_56, -1] SCREAMING_SNAKE_CASE__ : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) SCREAMING_SNAKE_CASE__ : str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = '''stabilityai/stable-diffusion-2''' SCREAMING_SNAKE_CASE__ : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCamelCase , subfolder="""scheduler""" ) SCREAMING_SNAKE_CASE__ : str = FlaxStableDiffusionPipeline.from_pretrained( _lowerCamelCase , scheduler=_lowerCamelCase , revision="""bf16""" , dtype=jnp.bfloataa , ) SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler_params SCREAMING_SNAKE_CASE__ : List[str] = '''A painting of a squirrel eating a burger''' SCREAMING_SNAKE_CASE__ : Tuple = jax.device_count() SCREAMING_SNAKE_CASE__ : List[str] = num_samples * [prompt] SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe.prepare_inputs(_lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = replicate(_lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Tuple = shard(_lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Tuple = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ : str = jax.random.split(_lowerCamelCase , jax.device_count() ) SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_inference_steps=25 , jit=_lowerCamelCase )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) SCREAMING_SNAKE_CASE__ : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) SCREAMING_SNAKE_CASE__ : List[str] = images[0, 2_53:2_56, 2_53:2_56, -1] SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) SCREAMING_SNAKE_CASE__ : int = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
223
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def __lowerCamelCase ( UpperCAmelCase_ : int = 8 ): """simple docstring""" a :Optional[int] = ascii_letters + digits + punctuation return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) ) def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ): """simple docstring""" i -= len(UpperCAmelCase_ ) a :Tuple = i // 3 a :int = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) a :Union[str, Any] = ( chars_incl + random(UpperCAmelCase_ , quotient + remainder ) + random(UpperCAmelCase_ , UpperCAmelCase_ ) + random(UpperCAmelCase_ , UpperCAmelCase_ ) ) a :Dict = list(UpperCAmelCase_ ) shuffle(UpperCAmelCase_ ) return "".join(UpperCAmelCase_ ) # random is a generalised function for letters, characters and numbers def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ): """simple docstring""" return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) ) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ): """simple docstring""" pass # Put your code here... def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ): """simple docstring""" pass # Put your code here... def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ): """simple docstring""" pass # Put your code here... def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int = 8 ): """simple docstring""" if len(UpperCAmelCase_ ) < min_length: # Your Password must be at least 8 characters long return False a :Dict = any(char in ascii_uppercase for char in password ) a :Optional[int] = any(char in ascii_lowercase for char in password ) a :Tuple = any(char in digits for char in password ) a :Any = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def __lowerCamelCase ( ): """simple docstring""" a :int = int(input('''Please indicate the max length of your password: ''' ).strip() ) a :Union[str, Any] = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''' , password_generator(UpperCAmelCase_ ) ) print( '''Alternative Password generated:''' , alternative_password_generator(UpperCAmelCase_ , UpperCAmelCase_ ) , ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
445
0
"""simple docstring""" def lowercase ( UpperCamelCase : int ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and number_of_steps > 0 ), F'''number_of_steps needs to be positive integer, your input {number_of_steps}''' if number_of_steps == 1: return 1 A__ , A__ : str =1, 1 for _ in range(number_of_steps - 1 ): A__ , A__ : Dict =current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
595
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : int=3 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : int="divided_space_time" , UpperCamelCase__ : Tuple=None , ): A__ : str =parent A__ : str =batch_size A__ : Any =image_size A__ : Union[str, Any] =num_channels A__ : str =patch_size A__ : Union[str, Any] =num_frames A__ : Any =is_training A__ : Optional[int] =use_labels A__ : Optional[int] =hidden_size A__ : Union[str, Any] =num_hidden_layers A__ : List[str] =num_attention_heads A__ : Tuple =intermediate_size A__ : List[Any] =hidden_act A__ : str =hidden_dropout_prob A__ : Optional[Any] =attention_probs_dropout_prob A__ : Dict =attention_type A__ : str =initializer_range A__ : str =scope A__ : int =num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token A__ : Optional[Any] =(image_size // patch_size) ** 2 A__ : List[Any] =(num_frames) * self.num_patches_per_frame + 1 def _UpperCAmelCase ( self : str ): A__ : Dict =floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) A__ : List[Any] =None if self.use_labels: A__ : List[str] =ids_tensor([self.batch_size] , self.num_labels ) A__ : List[Any] =self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self : Tuple ): A__ : Tuple =TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) A__ : Tuple =self.num_labels return config def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ): A__ : Union[str, Any] =TimesformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : Union[str, Any] =model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ): A__ : Union[str, Any] =TimesformerForVideoClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A__ : int =model(UpperCamelCase__ ) # verify the logits shape A__ : Optional[int] =torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[int] ): A__ : int =self.prepare_config_and_inputs() A__ , A__ , A__ : Tuple =config_and_inputs A__ : int ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): '''simple docstring''' __magic_name__ : Any = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __magic_name__ : Optional[Any] = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) __magic_name__ : int = False __magic_name__ : Optional[Any] = False __magic_name__ : int = False __magic_name__ : Tuple = False def _UpperCAmelCase ( self : List[str] ): A__ : Optional[Any] =TimesformerModelTester(self ) A__ : int =ConfigTester( self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def _UpperCAmelCase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=False ): A__ : str =copy.deepcopy(UpperCamelCase__ ) if return_labels: if model_class in get_values(UpperCamelCase__ ): A__ : List[str] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) return inputs_dict def _UpperCAmelCase ( self : Any ): self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds" ) def _UpperCAmelCase ( self : Union[str, Any] ): pass def _UpperCAmelCase ( self : Tuple ): A__ , A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : Dict =model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ : Any =model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def _UpperCAmelCase ( self : Union[str, Any] ): A__ , A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : Optional[int] =model_class(UpperCamelCase__ ) A__ : Dict =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ : Any =[*signature.parameters.keys()] A__ : Union[str, Any] =["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[Any] ): A__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _UpperCAmelCase ( self : List[Any] ): A__ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*UpperCamelCase__ ) @slow def _UpperCAmelCase ( self : Any ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Dict =TimesformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def _UpperCAmelCase ( self : Dict ): if not self.has_attentions: pass else: A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common() A__ : Optional[Any] =True for model_class in self.all_model_classes: A__ : Tuple =self.model_tester.seq_length A__ : Optional[int] =self.model_tester.num_frames A__ : List[Any] =True A__ : Optional[Any] =False A__ : List[Any] =True A__ : Optional[Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Tuple =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ : Any =True A__ : int =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : str =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) A__ : int =len(UpperCamelCase__ ) # Check attention is always last and order is fine A__ : List[Any] =True A__ : Optional[Any] =True A__ : Optional[Any] =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) ) A__ : Optional[int] =outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def _UpperCAmelCase ( self : Any ): def check_hidden_states_output(UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ): A__ : Any =model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A__ : int =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A__ : Optional[Any] =outputs.hidden_states A__ : Optional[int] =self.model_tester.num_hidden_layers + 1 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) A__ : List[Any] =self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : Any =True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ : Optional[int] =True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowercase ( ): """simple docstring""" A__ : Any =hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) A__ : Union[str, Any] =np.load(UpperCamelCase ) return list(UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' @cached_property def _UpperCAmelCase ( self : List[Any] ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self : List[Any] ): A__ : Any =TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to( UpperCamelCase__ ) A__ : Dict =self.default_image_processor A__ : Tuple =prepare_video() A__ : Dict =image_processor(video[:8] , return_tensors="pt" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): A__ : Optional[int] =model(**UpperCamelCase__ ) # verify the logits A__ : Optional[Any] =torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) A__ : Dict =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
595
1
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class lowercase ( a ): lowercase__ : torch.FloatTensor class lowercase ( a , a ): @register_to_config def __init__( self : Union[str, Any] , _UpperCamelCase : int = 32 , _UpperCamelCase : int = 64 , _UpperCamelCase : int = 20 , _UpperCamelCase : int = 768 , _UpperCamelCase : Optional[int]=77 , _UpperCamelCase : Dict=4 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : str = "silu" , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "linear" , _UpperCamelCase : Optional[str] = "prd" , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , ) -> Any: '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = attention_head_dim SCREAMING_SNAKE_CASE = num_attention_heads * attention_head_dim SCREAMING_SNAKE_CASE = additional_embeddings SCREAMING_SNAKE_CASE = time_embed_dim or inner_dim SCREAMING_SNAKE_CASE = embedding_proj_dim or embedding_dim SCREAMING_SNAKE_CASE = clip_embed_dim or embedding_dim SCREAMING_SNAKE_CASE = Timesteps(_UpperCamelCase , _UpperCamelCase , 0 ) SCREAMING_SNAKE_CASE = TimestepEmbedding(_UpperCamelCase , _UpperCamelCase , out_dim=_UpperCamelCase , act_fn=_UpperCamelCase ) SCREAMING_SNAKE_CASE = nn.Linear(_UpperCamelCase , _UpperCamelCase ) if embedding_proj_norm_type is None: SCREAMING_SNAKE_CASE = None elif embedding_proj_norm_type == "layer": SCREAMING_SNAKE_CASE = nn.LayerNorm(_UpperCamelCase ) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" ) SCREAMING_SNAKE_CASE = nn.Linear(_UpperCamelCase , _UpperCamelCase ) if encoder_hid_proj_type is None: SCREAMING_SNAKE_CASE = None elif encoder_hid_proj_type == "linear": SCREAMING_SNAKE_CASE = nn.Linear(_UpperCamelCase , _UpperCamelCase ) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" ) SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _UpperCamelCase ) ) if added_emb_type == "prd": SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , 1 , _UpperCamelCase ) ) elif added_emb_type is None: SCREAMING_SNAKE_CASE = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." ) SCREAMING_SNAKE_CASE = nn.ModuleList( [ BasicTransformerBlock( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , dropout=_UpperCamelCase , activation_fn="gelu" , attention_bias=_UpperCamelCase , ) for d in range(_UpperCamelCase ) ] ) if norm_in_type == "layer": SCREAMING_SNAKE_CASE = nn.LayerNorm(_UpperCamelCase ) elif norm_in_type is None: SCREAMING_SNAKE_CASE = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." ) SCREAMING_SNAKE_CASE = nn.LayerNorm(_UpperCamelCase ) SCREAMING_SNAKE_CASE = nn.Linear(_UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 ) causal_attention_mask.triu_(1 ) SCREAMING_SNAKE_CASE = causal_attention_mask[None, ...] self.register_buffer("causal_attention_mask" , _UpperCamelCase , persistent=_UpperCamelCase ) SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , _UpperCamelCase ) ) SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , _UpperCamelCase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __snake_case( self : int ) -> Dict[str, AttentionProcessor]: '''simple docstring''' SCREAMING_SNAKE_CASE = {} def fn_recursive_add_processors(_UpperCamelCase : str , _UpperCamelCase : torch.nn.Module , _UpperCamelCase : Dict[str, AttentionProcessor] ): if hasattr(_UpperCamelCase , "set_processor" ): SCREAMING_SNAKE_CASE = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return processors def __snake_case( self : Any , _UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = len(self.attn_processors.keys() ) if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(_UpperCamelCase : str , _UpperCamelCase : torch.nn.Module , _UpperCamelCase : int ): if hasattr(_UpperCamelCase , "set_processor" ): if not isinstance(_UpperCamelCase , _UpperCamelCase ): module.set_processor(_UpperCamelCase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def __snake_case( self : List[str] ) -> Tuple: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def __snake_case( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[torch.Tensor, float, int] , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.BoolTensor] = None , _UpperCamelCase : bool = True , ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = hidden_states.shape[0] SCREAMING_SNAKE_CASE = timestep if not torch.is_tensor(_UpperCamelCase ): SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(_UpperCamelCase ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE = timesteps * torch.ones(_UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device ) SCREAMING_SNAKE_CASE = self.time_proj(_UpperCamelCase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. SCREAMING_SNAKE_CASE = timesteps_projected.to(dtype=self.dtype ) SCREAMING_SNAKE_CASE = self.time_embedding(_UpperCamelCase ) if self.embedding_proj_norm is not None: SCREAMING_SNAKE_CASE = self.embedding_proj_norm(_UpperCamelCase ) SCREAMING_SNAKE_CASE = self.embedding_proj(_UpperCamelCase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: SCREAMING_SNAKE_CASE = self.encoder_hidden_states_proj(_UpperCamelCase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" ) SCREAMING_SNAKE_CASE = self.proj_in(_UpperCamelCase ) SCREAMING_SNAKE_CASE = self.positional_embedding.to(hidden_states.dtype ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 if encoder_hidden_states is not None: additional_embeds.append(_UpperCamelCase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: SCREAMING_SNAKE_CASE = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: SCREAMING_SNAKE_CASE = hidden_states[:, None, :] SCREAMING_SNAKE_CASE = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: SCREAMING_SNAKE_CASE = self.prd_embedding.to(hidden_states.dtype ).expand(_UpperCamelCase , -1 , -1 ) additional_embeds.append(_UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.cat( _UpperCamelCase , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens SCREAMING_SNAKE_CASE = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: SCREAMING_SNAKE_CASE = F.pad( _UpperCamelCase , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) SCREAMING_SNAKE_CASE = hidden_states + positional_embeddings if attention_mask is not None: SCREAMING_SNAKE_CASE = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0 SCREAMING_SNAKE_CASE = F.pad(_UpperCamelCase , (0, self.additional_embeddings) , value=0.0 ) SCREAMING_SNAKE_CASE = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) SCREAMING_SNAKE_CASE = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: SCREAMING_SNAKE_CASE = self.norm_in(_UpperCamelCase ) for block in self.transformer_blocks: SCREAMING_SNAKE_CASE = block(_UpperCamelCase , attention_mask=_UpperCamelCase ) SCREAMING_SNAKE_CASE = self.norm_out(_UpperCamelCase ) if self.prd_embedding is not None: SCREAMING_SNAKE_CASE = hidden_states[:, -1] else: SCREAMING_SNAKE_CASE = hidden_states[:, additional_embeddings_len:] SCREAMING_SNAKE_CASE = self.proj_to_clip_embeddings(_UpperCamelCase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=_UpperCamelCase ) def __snake_case( self : List[str] , _UpperCamelCase : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
403
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCamelCase : Optional[int] = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } _lowerCamelCase : List[str] = { '''squeezebert/squeezebert-uncased''': 5_12, '''squeezebert/squeezebert-mnli''': 5_12, '''squeezebert/squeezebert-mnli-headless''': 5_12, } _lowerCamelCase : int = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class lowercase ( a ): lowercase__ : Optional[Any] = VOCAB_FILES_NAMES lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : str = SqueezeBertTokenizer def __init__( self : Dict , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]="[UNK]" , _UpperCamelCase : List[Any]="[SEP]" , _UpperCamelCase : Tuple="[PAD]" , _UpperCamelCase : int="[CLS]" , _UpperCamelCase : Tuple="[MASK]" , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : Any , ) -> Optional[Any]: '''simple docstring''' super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , normalizer_state.pop("type" ) ) SCREAMING_SNAKE_CASE = do_lower_case SCREAMING_SNAKE_CASE = strip_accents SCREAMING_SNAKE_CASE = tokenize_chinese_chars SCREAMING_SNAKE_CASE = normalizer_class(**_UpperCamelCase ) SCREAMING_SNAKE_CASE = do_lower_case def __snake_case( self : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=None ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
403
1
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _UpperCamelCase ( a__ ): """simple docstring""" snake_case_ = ['vqvae'] def __init__( self : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Tuple , ) -> str: '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A , mel=_A , vqvae=_A ) def _UpperCAmelCase ( self : Optional[int] ) -> Any: '''simple docstring''' return 50 if isinstance(self.scheduler , _A ) else 1000 @torch.no_grad() def __call__( self : Tuple , snake_case : Optional[int] = 1 , snake_case : Union[str, Any] = None , snake_case : Dict = None , snake_case : Tuple = 0 , snake_case : Any = 0 , snake_case : List[str] = None , snake_case : str = None , snake_case : List[str] = 0 , snake_case : Dict = 0 , snake_case : int = None , snake_case : Optional[int] = 0 , snake_case : int = None , snake_case : int = None , snake_case : Optional[Any]=True , ) -> Any: '''simple docstring''' __magic_name__ : List[str] = steps or self.get_default_steps() self.scheduler.set_timesteps(_A ) __magic_name__ : Optional[Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __magic_name__ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __magic_name__ : Optional[Any] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=_A , device=self.device , ) __magic_name__ : Dict = noise __magic_name__ : Optional[Any] = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_A , _A ) __magic_name__ : Union[str, Any] = self.mel.audio_slice_to_image(_A ) __magic_name__ : int = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) __magic_name__ : int = (input_image / 255) * 2 - 1 __magic_name__ : str = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __magic_name__ : List[Any] = self.vqvae.encode(torch.unsqueeze(_A , 0 ) ).latent_dist.sample( generator=_A )[0] __magic_name__ : Tuple = self.vqvae.config.scaling_factor * input_images if start_step > 0: __magic_name__ : List[Any] = self.scheduler.add_noise(_A , _A , self.scheduler.timesteps[start_step - 1] ) __magic_name__ : Optional[Any] = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __magic_name__ : Optional[Any] = int(mask_start_secs * pixels_per_second ) __magic_name__ : Optional[int] = int(mask_end_secs * pixels_per_second ) __magic_name__ : int = self.scheduler.add_noise(_A , _A , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , _A ): __magic_name__ : str = self.unet(_A , _A , _A )['sample'] else: __magic_name__ : Any = self.unet(_A , _A )['sample'] if isinstance(self.scheduler , _A ): __magic_name__ : Union[str, Any] = self.scheduler.step( model_output=_A , timestep=_A , sample=_A , eta=_A , generator=_A , )['prev_sample'] else: __magic_name__ : Any = self.scheduler.step( model_output=_A , timestep=_A , sample=_A , generator=_A , )['prev_sample'] if mask is not None: if mask_start > 0: __magic_name__ : Any = mask[:, step, :, :mask_start] if mask_end > 0: __magic_name__ : Optional[Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __magic_name__ : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images __magic_name__ : Any = self.vqvae.decode(_A )['sample'] __magic_name__ : Any = (images / 2 + 0.5).clamp(0 , 1 ) __magic_name__ : Tuple = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __magic_name__ : Any = (images * 255).round().astype('''uint8''' ) __magic_name__ : Any = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_A , mode='''RGB''' ).convert('''L''' ) for _ in images) ) __magic_name__ : Dict = [self.mel.image_to_audio(_A ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) , **ImagePipelineOutput(_A ) ) @torch.no_grad() def _UpperCAmelCase ( self : Optional[Any] , snake_case : Optional[int] , snake_case : Tuple = 50 ) -> Tuple: '''simple docstring''' assert isinstance(self.scheduler , _A ) self.scheduler.set_timesteps(_A ) __magic_name__ : Dict = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) __magic_name__ : Dict = (sample / 255) * 2 - 1 __magic_name__ : List[str] = torch.Tensor(_A ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __magic_name__ : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __magic_name__ : Optional[int] = self.scheduler.alphas_cumprod[t] __magic_name__ : Dict = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __magic_name__ : Union[str, Any] = 1 - alpha_prod_t __magic_name__ : Union[str, Any] = self.unet(_A , _A )['sample'] __magic_name__ : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output __magic_name__ : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __magic_name__ : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _UpperCAmelCase ( snake_case : Any , snake_case : Optional[Any] , snake_case : Dict ) -> Optional[int]: '''simple docstring''' __magic_name__ : int = acos(torch.dot(torch.flatten(_A ) , torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) ) return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
716
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case_ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) snake_case_ = ( { 'feature-extraction': TFMobileBertModel, 'fill-mask': TFMobileBertForMaskedLM, 'question-answering': TFMobileBertForQuestionAnswering, 'text-classification': TFMobileBertForSequenceClassification, 'token-classification': TFMobileBertForTokenClassification, 'zero-shot': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) snake_case_ = False snake_case_ = False def _UpperCAmelCase ( self : Dict , snake_case : List[Any] , snake_case : Dict , snake_case : int=False ) -> Tuple: '''simple docstring''' __magic_name__ : Optional[int] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) if return_labels: if model_class in get_values(snake_case ): __magic_name__ : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _UpperCamelCase ( lowerCamelCase__ ): """simple docstring""" def __init__( self : Tuple , snake_case : Tuple , snake_case : int=13 , snake_case : Any=7 , snake_case : str=True , snake_case : List[Any]=True , snake_case : int=True , snake_case : Any=True , snake_case : List[Any]=99 , snake_case : Any=32 , snake_case : List[str]=32 , snake_case : Union[str, Any]=2 , snake_case : Union[str, Any]=4 , snake_case : List[Any]=37 , snake_case : Tuple="gelu" , snake_case : str=0.1 , snake_case : Dict=0.1 , snake_case : List[Any]=512 , snake_case : Dict=16 , snake_case : int=2 , snake_case : Union[str, Any]=0.02 , snake_case : Optional[Any]=3 , snake_case : int=4 , snake_case : Dict=None , ) -> int: '''simple docstring''' __magic_name__ : Dict = parent __magic_name__ : Dict = batch_size __magic_name__ : Dict = seq_length __magic_name__ : Optional[Any] = is_training __magic_name__ : Union[str, Any] = use_input_mask __magic_name__ : Optional[Any] = use_token_type_ids __magic_name__ : Optional[Any] = use_labels __magic_name__ : Union[str, Any] = vocab_size __magic_name__ : Dict = hidden_size __magic_name__ : List[str] = num_hidden_layers __magic_name__ : Union[str, Any] = num_attention_heads __magic_name__ : Optional[int] = intermediate_size __magic_name__ : Dict = hidden_act __magic_name__ : List[Any] = hidden_dropout_prob __magic_name__ : Optional[int] = attention_probs_dropout_prob __magic_name__ : str = max_position_embeddings __magic_name__ : Union[str, Any] = type_vocab_size __magic_name__ : List[str] = type_sequence_label_size __magic_name__ : int = initializer_range __magic_name__ : int = num_labels __magic_name__ : Union[str, Any] = num_choices __magic_name__ : List[Any] = scope __magic_name__ : str = embedding_size def _UpperCAmelCase ( self : List[Any] ) -> List[str]: '''simple docstring''' __magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Optional[Any] = None if self.use_input_mask: __magic_name__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : Any = None if self.use_token_type_ids: __magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : str = None __magic_name__ : Tuple = None __magic_name__ : List[str] = None if self.use_labels: __magic_name__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ : Union[str, Any] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self : Dict , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : str , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[Any] ) -> str: '''simple docstring''' __magic_name__ : Optional[Any] = TFMobileBertModel(config=snake_case ) __magic_name__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ : Dict = model(snake_case ) __magic_name__ : str = [input_ids, input_mask] __magic_name__ : List[str] = model(snake_case ) __magic_name__ : Tuple = model(snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self : Optional[Any] , snake_case : int , snake_case : int , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : List[str] ) -> str: '''simple docstring''' __magic_name__ : Dict = TFMobileBertForMaskedLM(config=snake_case ) __magic_name__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ : Tuple = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : str , snake_case : Tuple , snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' __magic_name__ : Optional[int] = TFMobileBertForNextSentencePrediction(config=snake_case ) __magic_name__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ : str = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _UpperCAmelCase ( self : Any , snake_case : str , snake_case : List[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any] ) -> str: '''simple docstring''' __magic_name__ : Dict = TFMobileBertForPreTraining(config=snake_case ) __magic_name__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ : Optional[int] = model(snake_case ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _UpperCAmelCase ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Optional[int] ) -> Optional[int]: '''simple docstring''' __magic_name__ : Union[str, Any] = self.num_labels __magic_name__ : List[Any] = TFMobileBertForSequenceClassification(config=snake_case ) __magic_name__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ : Dict = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Dict , snake_case : Any ) -> int: '''simple docstring''' __magic_name__ : Tuple = self.num_choices __magic_name__ : Dict = TFMobileBertForMultipleChoice(config=snake_case ) __magic_name__ : Optional[int] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) ) __magic_name__ : str = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __magic_name__ : str = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCAmelCase ( self : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Any , snake_case : Dict , snake_case : str ) -> List[Any]: '''simple docstring''' __magic_name__ : Tuple = self.num_labels __magic_name__ : int = TFMobileBertForTokenClassification(config=snake_case ) __magic_name__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ : List[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self : int , snake_case : Tuple , snake_case : List[Any] , snake_case : Tuple , snake_case : str , snake_case : Optional[int] , snake_case : Tuple , snake_case : List[str] ) -> List[Any]: '''simple docstring''' __magic_name__ : int = TFMobileBertForQuestionAnswering(config=snake_case ) __magic_name__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __magic_name__ : Any = model(snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' __magic_name__ : str = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) : int = config_and_inputs __magic_name__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def _UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' __magic_name__ : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self ) __magic_name__ : Tuple = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def _UpperCAmelCase ( self : Any ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*snake_case ) def _UpperCAmelCase ( self : Tuple ) -> Any: '''simple docstring''' __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case ) def _UpperCAmelCase ( self : Any ) -> List[str]: '''simple docstring''' __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case ) def _UpperCAmelCase ( self : Optional[int] ) -> List[str]: '''simple docstring''' __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case ) def _UpperCAmelCase ( self : List[str] ) -> List[str]: '''simple docstring''' __magic_name__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case ) def _UpperCAmelCase ( self : Any ) -> Any: '''simple docstring''' __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case ) def _UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case ) def _UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case ) @slow def _UpperCAmelCase ( self : Dict ) -> Tuple: '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: __magic_name__ : str = TFMobileBertModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @require_tf class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def _UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' __magic_name__ : Dict = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) __magic_name__ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __magic_name__ : List[str] = model(snake_case )[0] __magic_name__ : Tuple = [1, 6, 3_0522] self.assertEqual(output.shape , snake_case ) __magic_name__ : Union[str, Any] = tf.constant( [ [ [-4.591_9547, -9.24_8295, -9.64_5256], [-6.730_6175, -6.44_0284, -6.605_2837], [-7.274_3506, -6.784_7915, -6.02_4673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
147
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __magic_name__ : Any = logging.get_logger(__name__) __magic_name__ : Tuple = { """microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""", } class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase : List[str] = """resnet""" __lowerCAmelCase : List[Any] = ["""basic""", """bottleneck"""] def __init__( self , _A=3 , _A=6_4 , _A=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _A=[3, 4, 6, 3] , _A="bottleneck" , _A="relu" , _A=False , _A=None , _A=None , **_A , ): '''simple docstring''' super().__init__(**_A ) if layer_type not in self.layer_types: raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" ) UpperCamelCase : List[Any] = num_channels UpperCamelCase : str = embedding_size UpperCamelCase : List[Any] = hidden_sizes UpperCamelCase : str = depths UpperCamelCase : Tuple = layer_type UpperCamelCase : Tuple = hidden_act UpperCamelCase : str = downsample_in_first_stage UpperCamelCase : str = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(_A ) + 1 )] UpperCamelCase , UpperCamelCase : Union[str, Any] = get_aligned_output_features_output_indices( out_features=_A , out_indices=_A , stage_names=self.stage_names ) class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase : Any = version.parse("""1.11""" ) @property def _a ( self ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a ( self ): '''simple docstring''' return 1e-3
102
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(_A) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]: super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int: _A = {} _A = {} if prompt is not None: _A = prompt if generate_kwargs is not None: _A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) _A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int: _A = load_image(__lowerCAmelCase ) if prompt is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) _A = self.model.config.model_type if model_type == "git": _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids _A = [self.tokenizer.cls_token_id] + input_ids _A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": _A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) _A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) model_inputs.update(__lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _A = None return model_inputs def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): _A = None if generate_kwargs is None: _A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _A = model_inputs.pop(self.model.main_input_name ) _A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase ) return model_outputs def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]: _A = [] for output_ids in model_outputs: _A = { '''generated_text''': self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , ) } records.append(__lowerCAmelCase ) return records
2
0
import os from distutils.util import strtobool def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: for e in env_keys: lowercase__ = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) ) if val >= 0: return val return default def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: lowercase__ = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int... def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Union[str, Any]: lowercase__ = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return value
45
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]: lowercase__ = None if token is not None: lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""} # The id of a workflow (not of a workflow run) lowercase__ = '636036' lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) lowercase__ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowercase__ = workflow_run['id'] break return workflow_run_id def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowercase__ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = {} for artifact_name in artifact_names: lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): lowercase__ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: lowercase__ = f.read().decode('UTF-8' ) return results
45
1
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> bool: if num < 0: return False a__ : int = num a__ : int = 0 while num > 0: a__ : Any = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
191
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
191
1
"""simple docstring""" from math import ceil def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_1 ): '''simple docstring''' lowerCAmelCase : int = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): lowerCAmelCase : Optional[int] = 2 * i + 1 lowerCAmelCase : Optional[int] = 2 * i lowerCAmelCase : str = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: lowerCAmelCase__ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
707
"""simple docstring""" from typing import Any class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , snake_case__ ): """simple docstring""" lowerCAmelCase : Dict = data lowerCAmelCase : Any = None def __repr__( self ): """simple docstring""" return f"""Node({self.data})""" class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self ): """simple docstring""" lowerCAmelCase : Tuple = None def __iter__( self ): """simple docstring""" lowerCAmelCase : Any = self.head while node: yield node.data lowerCAmelCase : Optional[int] = node.next def __len__( self ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self ): """simple docstring""" return "->".join([str(snake_case__ ) for item in self] ) def __getitem__( self , snake_case__ ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , snake_case__ , snake_case__ ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) lowerCAmelCase : Union[str, Any] = self.head for _ in range(snake_case__ ): lowerCAmelCase : int = current.next lowerCAmelCase : List[str] = data def lowercase__ ( self , snake_case__ ): """simple docstring""" self.insert_nth(len(self ) , snake_case__ ) def lowercase__ ( self , snake_case__ ): """simple docstring""" self.insert_nth(0 , snake_case__ ) def lowercase__ ( self , snake_case__ , snake_case__ ): """simple docstring""" if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) lowerCAmelCase : Optional[int] = Node(snake_case__ ) if self.head is None: lowerCAmelCase : Any = new_node elif index == 0: lowerCAmelCase : Any = self.head # link new_node to head lowerCAmelCase : Union[str, Any] = new_node else: lowerCAmelCase : List[str] = self.head for _ in range(index - 1 ): lowerCAmelCase : int = temp.next lowerCAmelCase : int = temp.next lowerCAmelCase : Dict = new_node def lowercase__ ( self ): # print every node data """simple docstring""" print(self ) def lowercase__ ( self ): """simple docstring""" return self.delete_nth(0 ) def lowercase__ ( self ): # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def lowercase__ ( self , snake_case__ = 0 ): """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) lowerCAmelCase : List[Any] = self.head # default first node if index == 0: lowerCAmelCase : Optional[int] = self.head.next else: lowerCAmelCase : List[str] = self.head for _ in range(index - 1 ): lowerCAmelCase : Union[str, Any] = temp.next lowerCAmelCase : Optional[Any] = temp.next lowerCAmelCase : Any = temp.next.next return delete_node.data def lowercase__ ( self ): """simple docstring""" return self.head is None def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : str = None lowerCAmelCase : Optional[int] = self.head while current: # Store the current node's next node. lowerCAmelCase : List[Any] = current.next # Make the current node's next point backwards lowerCAmelCase : Dict = prev # Make the previous node be the current node lowerCAmelCase : List[str] = current # Make the current node the next node (to progress iteration) lowerCAmelCase : int = next_node # Return prev in order to put the head at the end lowerCAmelCase : Tuple = prev def a__ ( ): '''simple docstring''' lowerCAmelCase : Tuple = LinkedList() assert linked_list.is_empty() is True assert str(SCREAMING_SNAKE_CASE ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(1_0 ): assert len(SCREAMING_SNAKE_CASE ) == i linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 ) assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(1_1 ) assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 1_0 assert linked_list.delete_tail() == 1_1 assert len(SCREAMING_SNAKE_CASE ) == 9 assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowerCAmelCase : Optional[Any] = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) ) def a__ ( ): '''simple docstring''' lowerCAmelCase : List[str] = [ -9, 1_0_0, Node(7_7_3_4_5_1_1_2 ), "dlrow olleH", 7, 5_5_5_5, 0, -192.55_555, "Hello, world!", 77.9, Node(1_0 ), None, None, 12.20, ] lowerCAmelCase : List[str] = LinkedList() for i in test_input: linked_list.insert_tail(SCREAMING_SNAKE_CASE ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowerCAmelCase : str = linked_list.delete_head() assert result == -9 assert ( str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowerCAmelCase : Union[str, Any] = linked_list.delete_tail() assert result == 12.2 assert ( str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 ) assert result is None assert ( str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(SCREAMING_SNAKE_CASE ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(SCREAMING_SNAKE_CASE ) assert ( str(SCREAMING_SNAKE_CASE ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(SCREAMING_SNAKE_CASE ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def a__ ( ): '''simple docstring''' from doctest import testmod testmod() lowerCAmelCase : Optional[Any] = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(SCREAMING_SNAKE_CASE ) print("\nReading/changing Node data using indexing:" ) print(f"""Element at Position 1: {linked_list[1]}""" ) lowerCAmelCase : Any = input("Enter New Value: " ).strip() print("New list:" ) print(SCREAMING_SNAKE_CASE ) print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" ) if __name__ == "__main__": main()
681
0
def a_ ( ) -> Optional[Any]: """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def a_ ( __magic_name__ ) -> List[Any]: """simple docstring""" snake_case : List[str] = 1 snake_case : int = 2 while i * i <= n: snake_case : Tuple = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def a_ ( ) -> Union[str, Any]: """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(__magic_name__ ) > 500 ) if __name__ == "__main__": print(solution())
598
import operator as op def a_ ( __magic_name__ ) -> Any: """simple docstring""" snake_case : str = [] snake_case : Any = lambda __magic_name__ , __magic_name__ : int(x / y ) # noqa: E731 integer division operation snake_case : Optional[Any] = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' ) print('''-''' * (30 + len(__magic_name__ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__magic_name__ ) # append x to stack # output in tabular format print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' ) else: snake_case : Optional[int] = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' ) snake_case : Optional[Any] = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' ) stack.append( str(opr[x](int(__magic_name__ ) , int(__magic_name__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' , ) return int(stack[0] ) if __name__ == "__main__": _a : Union[str, Any] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ') print('\n\tResult = ', solve(Postfix))
598
1
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch UpperCAmelCase_ : Any = logging.get_logger(__name__) @add_end_docstrings( A , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , ) class UpperCAmelCase__ ( A ): def lowerCamelCase_ ( self : Dict,__A : GenericTensor ): if self.framework == "tf": _lowerCamelCase : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _lowerCamelCase : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id,as_tuple=__A ) else: raise ValueError("Unsupported framework" ) return masked_index def lowerCamelCase_ ( self : Dict,__A : GenericTensor ): _lowerCamelCase : List[Any] = self.get_masked_index(__A ) _lowerCamelCase : List[Any] = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask",self.model.base_model_prefix,f'No mask_token ({self.tokenizer.mask_token}) found on the input',) def lowerCamelCase_ ( self : Optional[Any],__A : GenericTensor ): if isinstance(__A,__A ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__A ) def lowerCamelCase_ ( self : List[str],__A : Tuple,__A : List[Any]=None,**__A : str ): if return_tensors is None: _lowerCamelCase : Optional[int] = self.framework _lowerCamelCase : List[Any] = self.tokenizer(__A,return_tensors=__A ) self.ensure_exactly_one_mask_token(__A ) return model_inputs def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any] ): _lowerCamelCase : Any = self.model(**__A ) _lowerCamelCase : Optional[Any] = model_inputs["input_ids"] return model_outputs def lowerCamelCase_ ( self : List[str],__A : int,__A : Optional[Any]=5,__A : int=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _lowerCamelCase : str = target_ids.shape[0] _lowerCamelCase : int = model_outputs["input_ids"][0] _lowerCamelCase : int = model_outputs["logits"] if self.framework == "tf": _lowerCamelCase : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _lowerCamelCase : List[Any] = outputs.numpy() _lowerCamelCase : List[str] = outputs[0, masked_index, :] _lowerCamelCase : Dict = stable_softmax(__A,axis=-1 ) if target_ids is not None: _lowerCamelCase : Dict = tf.gather_nd(tf.squeeze(__A,0 ),target_ids.reshape(-1,1 ) ) _lowerCamelCase : int = tf.expand_dims(__A,0 ) _lowerCamelCase : Tuple = tf.math.top_k(__A,k=__A ) _lowerCamelCase , _lowerCamelCase : Union[str, Any] = topk.values.numpy(), topk.indices.numpy() else: _lowerCamelCase : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id,as_tuple=__A ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _lowerCamelCase : List[str] = outputs[0, masked_index, :] _lowerCamelCase : Optional[Any] = logits.softmax(dim=-1 ) if target_ids is not None: _lowerCamelCase : List[Any] = probs[..., target_ids] _lowerCamelCase , _lowerCamelCase : Optional[int] = probs.topk(__A ) _lowerCamelCase : int = [] _lowerCamelCase : Tuple = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist(),predictions.tolist() ) ): _lowerCamelCase : List[Any] = [] for v, p in zip(_values,_predictions ): # Copy is important since we're going to modify this array in place _lowerCamelCase : Optional[Any] = input_ids.numpy().copy() if target_ids is not None: _lowerCamelCase : List[str] = target_ids[p].tolist() _lowerCamelCase : Optional[int] = p # Filter padding out: _lowerCamelCase : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _lowerCamelCase : str = self.tokenizer.decode(__A,skip_special_tokens=__A ) _lowerCamelCase : Any = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(__A ) result.append(__A ) if single_mask: return result[0] return result def lowerCamelCase_ ( self : Optional[Any],__A : Tuple,__A : Optional[Any]=None ): if isinstance(__A,__A ): _lowerCamelCase : Tuple = [targets] try: _lowerCamelCase : Dict = self.tokenizer.get_vocab() except Exception: _lowerCamelCase : Optional[int] = {} _lowerCamelCase : List[str] = [] for target in targets: _lowerCamelCase : Dict = vocab.get(__A,__A ) if id_ is None: _lowerCamelCase : int = self.tokenizer( __A,add_special_tokens=__A,return_attention_mask=__A,return_token_type_ids=__A,max_length=1,truncation=__A,)["input_ids"] if len(__A ) == 0: logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' "We cannot replace it with anything meaningful, ignoring it" ) continue _lowerCamelCase : List[str] = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' ) target_ids.append(id_ ) _lowerCamelCase : Any = list(set(__A ) ) if len(__A ) == 0: raise ValueError("At least one target must be provided when passed." ) _lowerCamelCase : List[Any] = np.array(__A ) return target_ids def lowerCamelCase_ ( self : str,__A : Any=None,__A : int=None ): _lowerCamelCase : List[str] = {} if targets is not None: _lowerCamelCase : Optional[int] = self.get_target_ids(__A,__A ) _lowerCamelCase : Union[str, Any] = target_ids if top_k is not None: _lowerCamelCase : Optional[int] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask",self.model.base_model_prefix,"The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__( self : int,__A : List[str],*__A : Dict,**__A : List[str] ): _lowerCamelCase : List[str] = super().__call__(__A,**__A ) if isinstance(__A,__A ) and len(__A ) == 1: return outputs[0] return outputs
11
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCAmelCase__ : lowerCAmelCase_ = field( default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} ) lowerCAmelCase_ = field( default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) lowerCAmelCase_ = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCAmelCase_ = field( default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , ) lowerCAmelCase_ = field( default=64 , metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) } , ) lowerCAmelCase_ = field( default=30 , metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) } , ) lowerCAmelCase_ = field( default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowerCAmelCase_ = field( default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) lowerCAmelCase_ = field( default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowerCAmelCase_ = field( default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowerCAmelCase_ = field( default=0 , metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) } , ) lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} ) class UpperCAmelCase__ ( A ): lowerCAmelCase_ = 'train' lowerCAmelCase_ = 'dev' class UpperCAmelCase__ ( A ): lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",): _lowerCamelCase : Tuple = args _lowerCamelCase : List[str] = is_language_sensitive _lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__A,__A ): try: _lowerCamelCase : Union[str, Any] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) _lowerCamelCase : str = mode # Load data features from cache or dataset file _lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1" _lowerCamelCase : Optional[Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _lowerCamelCase : Tuple = cached_features_file + ".lock" with FileLock(__A ): if os.path.exists(__A ) and not args.overwrite_cache: _lowerCamelCase : int = time.time() _lowerCamelCase : int = torch.load(__A ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. _lowerCamelCase : Union[str, Any] = self.old_features["features"] _lowerCamelCase : List[Any] = self.old_features.get("dataset",__A ) _lowerCamelCase : List[Any] = self.old_features.get("examples",__A ) logger.info( f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in' " future run" ) else: if mode == Split.dev: _lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir ) else: _lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir ) _lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features( examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,) _lowerCamelCase : List[Any] = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self : Optional[Any] ): return len(self.features ) def __getitem__( self : Tuple,__A : str ): # Convert to Tensors and build dataset _lowerCamelCase : List[str] = self.features[i] _lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long ) _lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long ) _lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long ) _lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long ) _lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float ) _lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float ) _lowerCamelCase : Optional[Any] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: _lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long ) _lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
11
1
"""simple docstring""" import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a_ : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a_ : Optional[Any] = '''main''' # Default branch name a_ : int = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) a_ : Any = '''aaaaaaa''' # This commit does not exist, so we should 404. a_ : Optional[Any] = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes a_ : Optional[Any] = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def UpperCAmelCase ( ) -> Any: print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def UpperCAmelCase ( ) -> List[Any]: print('Bonjour!' ) yield print('Au revoir!' ) class __lowercase( unittest.TestCase ): '''simple docstring''' def snake_case_ ( self ): # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers' ) is not None class __lowercase( unittest.TestCase ): '''simple docstring''' @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO ) def snake_case_ ( self , __a ): with ContextManagers([] ): print('Transformers are awesome!' ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' ) @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO ) def snake_case_ ( self , __a ): with ContextManagers([context_en()] ): print('Transformers are awesome!' ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' ) @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO ) def snake_case_ ( self , __a ): with ContextManagers([context_fr(), context_en()] ): print('Transformers are awesome!' ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' ) @require_torch def snake_case_ ( self ): self.assertEqual(find_labels(_lowerCamelCase ) , ['labels'] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['labels', 'next_sentence_label'] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['start_positions', 'end_positions'] ) class __lowercase( __UpperCAmelCase ): '''simple docstring''' pass self.assertEqual(find_labels(_lowerCamelCase ) , ['labels'] ) @require_tf def snake_case_ ( self ): self.assertEqual(find_labels(_lowerCamelCase ) , ['labels'] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['labels', 'next_sentence_label'] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['start_positions', 'end_positions'] ) class __lowercase( __UpperCAmelCase ): '''simple docstring''' pass self.assertEqual(find_labels(_lowerCamelCase ) , ['labels'] ) @require_flax def snake_case_ ( self ): # Flax models don't have labels self.assertEqual(find_labels(_lowerCamelCase ) , [] ) self.assertEqual(find_labels(_lowerCamelCase ) , [] ) self.assertEqual(find_labels(_lowerCamelCase ) , [] ) class __lowercase( __UpperCAmelCase ): '''simple docstring''' pass self.assertEqual(find_labels(_lowerCamelCase ) , [] )
594
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = {'''vocab_file''': '''spiece.model'''} _lowerCamelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', } } # TODO(PVP) - this should be removed in Transformers v5 _lowerCamelCase = { '''t5-small''': 5_12, '''t5-base''': 5_12, '''t5-large''': 5_12, '''t5-3b''': 5_12, '''t5-11b''': 5_12, } _lowerCamelCase = '''▁''' class snake_case ( __UpperCAmelCase ): lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ['''input_ids''', '''attention_mask'''] def __init__( self :int , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Union[str, Any]="</s>" , _lowerCamelCase :List[Any]="<unk>" , _lowerCamelCase :Union[str, Any]="<pad>" , _lowerCamelCase :int=1_0_0 , _lowerCamelCase :Union[str, Any]=None , _lowerCamelCase :Optional[Dict[str, Any]] = None , _lowerCamelCase :int=True , **_lowerCamelCase :List[Any] , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __SCREAMING_SNAKE_CASE : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(_lowerCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __SCREAMING_SNAKE_CASE : Optional[int] = len(set(filter(lambda _lowerCamelCase : bool('''extra_id''' in str(_lowerCamelCase ) ) , _lowerCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) if legacy: logger.warning_once( f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = legacy __SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , extra_ids=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_lowerCamelCase , **_lowerCamelCase , ) __SCREAMING_SNAKE_CASE : Tuple = vocab_file __SCREAMING_SNAKE_CASE : List[str] = extra_ids __SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCamelCase ) @staticmethod def SCREAMING_SNAKE_CASE_ ( _lowerCamelCase :str , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :int ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: __SCREAMING_SNAKE_CASE : Any = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f''' {pretrained_model_name_or_path} automatically truncating your input to''' f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _lowerCamelCase , ) return max_model_length @property def SCREAMING_SNAKE_CASE_ ( self :Tuple ): return self.sp_model.get_piece_size() + self._extra_ids def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ): __SCREAMING_SNAKE_CASE : str = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None , _lowerCamelCase :bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_lowerCamelCase )) + [1] return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE_ ( self :List[str] ): return list( set(filter(lambda _lowerCamelCase : bool(re.search(r'''<extra_id_\d+>''' , _lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def SCREAMING_SNAKE_CASE_ ( self :List[Any] ): return [self._convert_token_to_id(_lowerCamelCase ) for token in self.get_sentinel_tokens()] def SCREAMING_SNAKE_CASE_ ( self :Any , _lowerCamelCase :List[int] ): if len(_lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ): __SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def SCREAMING_SNAKE_CASE_ ( self :List[Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ): __SCREAMING_SNAKE_CASE : Optional[Any] = self._add_eos_if_not_present(_lowerCamelCase ) if token_ids_a is None: return token_ids_a else: __SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_eos_if_not_present(_lowerCamelCase ) return token_ids_a + token_ids_a def __getstate__( self :Union[str, Any] ): __SCREAMING_SNAKE_CASE : Any = self.__dict__.copy() __SCREAMING_SNAKE_CASE : List[str] = None return state def __setstate__( self :Optional[Any] , _lowerCamelCase :List[str] ): __SCREAMING_SNAKE_CASE : Tuple = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __SCREAMING_SNAKE_CASE : Optional[int] = {} __SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :"TextInput" , **_lowerCamelCase :str ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: __SCREAMING_SNAKE_CASE : Dict = SPIECE_UNDERLINE + text.replace(_lowerCamelCase , ''' ''' ) return super().tokenize(_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :List[Any] , **_lowerCamelCase :Dict ): if not self.legacy: __SCREAMING_SNAKE_CASE : str = text.startswith(_lowerCamelCase ) if is_first: __SCREAMING_SNAKE_CASE : str = text[1:] __SCREAMING_SNAKE_CASE : Tuple = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :Optional[Any] ): if token.startswith('''<extra_id_''' ): __SCREAMING_SNAKE_CASE : Tuple = re.match(r'''<extra_id_(\d+)>''' , _lowerCamelCase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(_lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :Optional[int] ): if index < self.sp_model.get_piece_size(): __SCREAMING_SNAKE_CASE : List[Any] = self.sp_model.IdToPiece(_lowerCamelCase ) else: __SCREAMING_SNAKE_CASE : Dict = f'''<extra_id_{self.vocab_size - 1 - index}>''' return token def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :Any ): __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Dict = '''''' __SCREAMING_SNAKE_CASE : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCamelCase ) + token __SCREAMING_SNAKE_CASE : List[str] = True __SCREAMING_SNAKE_CASE : str = [] else: current_sub_tokens.append(_lowerCamelCase ) __SCREAMING_SNAKE_CASE : int = False out_string += self.sp_model.decode(_lowerCamelCase ) return out_string.strip() def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __SCREAMING_SNAKE_CASE : List[str] = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , '''wb''' ) as fi: __SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
674
0
'''simple docstring''' from __future__ import annotations from collections.abc import MutableSequence class lowerCAmelCase__ : '''simple docstring''' def __init__( self : int , snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> None: if len(lowercase__ ) != degree + 1: raise ValueError( 'The number of coefficients should be equal to the degree + 1.' ) _lowerCamelCase = list(lowercase__ ) _lowerCamelCase = degree def __add__( self : str , snake_case__ : int ) -> Polynomial: if self.degree > polynomial_a.degree: _lowerCamelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , lowercase__ ) else: _lowerCamelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , lowercase__ ) def __sub__( self : str , snake_case__ : List[str] ) -> Polynomial: return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : Dict ) -> Polynomial: return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : Optional[Any] , snake_case__ : Dict ) -> Polynomial: _lowerCamelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , lowercase__ ) def _snake_case ( self : Tuple , snake_case__ : int ) -> int | float: _lowerCamelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Dict ) -> str: _lowerCamelCase = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase__ ) return polynomial def __repr__( self : Union[str, Any] ) -> str: return self.__str__() def _snake_case ( self : Dict ) -> Polynomial: _lowerCamelCase = [0] * self.degree for i in range(self.degree ): _lowerCamelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , lowercase__ ) def _snake_case ( self : Any , snake_case__ : Any = 0 ) -> Polynomial: _lowerCamelCase = [0] * (self.degree + 2) _lowerCamelCase = constant for i in range(self.degree + 1 ): _lowerCamelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , lowercase__ ) def __eq__( self : Tuple , snake_case__ : Dict ) -> bool: if not isinstance(lowercase__ , lowercase__ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : Union[str, Any] , snake_case__ : Union[str, Any] ) -> bool: return not self.__eq__(lowercase__ )
703
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig A = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } A = logging.get_logger(__name__) class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCAmelCase_ = 'maskformer' lowerCAmelCase_ = {'hidden_size': 'mask_feature_size'} lowerCAmelCase_ = ['resnet', 'swin'] lowerCAmelCase_ = ['detr'] def __init__( self : int , snake_case__ : int = 2_5_6 , snake_case__ : int = 2_5_6 , snake_case__ : float = 0.1 , snake_case__ : bool = False , snake_case__ : Optional[Dict] = None , snake_case__ : Optional[Dict] = None , snake_case__ : float = 0.02 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 20.0 , snake_case__ : Optional[bool] = None , **snake_case__ : Dict , ) -> int: if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k _lowerCamelCase = SwinConfig( image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(snake_case__ , snake_case__ ): _lowerCamelCase = backbone_config.pop('model_type' ) _lowerCamelCase = CONFIG_MAPPING[backbone_model_type] _lowerCamelCase = config_class.from_dict(snake_case__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 _lowerCamelCase = DetrConfig() else: # verify that the decoder is supported _lowerCamelCase = ( decoder_config.pop('model_type' ) if isinstance(snake_case__ , snake_case__ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f"""Transformer Decoder {decoder_type} not supported, please use one of""" f""" {",".join(self.decoders_supported )}""" ) if isinstance(snake_case__ , snake_case__ ): _lowerCamelCase = CONFIG_MAPPING[decoder_type] _lowerCamelCase = config_class.from_dict(snake_case__ ) _lowerCamelCase = backbone_config _lowerCamelCase = decoder_config # main feature dimension for the model _lowerCamelCase = fpn_feature_size _lowerCamelCase = mask_feature_size # initializer _lowerCamelCase = init_std _lowerCamelCase = init_xavier_std # Hungarian matcher && loss _lowerCamelCase = cross_entropy_weight _lowerCamelCase = dice_weight _lowerCamelCase = mask_weight _lowerCamelCase = use_auxiliary_loss _lowerCamelCase = no_object_weight _lowerCamelCase = output_auxiliary_logits _lowerCamelCase = self.decoder_config.encoder_attention_heads _lowerCamelCase = self.decoder_config.num_hidden_layers super().__init__(**snake_case__ ) @classmethod def _snake_case ( cls : Optional[int] , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Tuple ) -> List[str]: return cls( backbone_config=snake_case__ , decoder_config=snake_case__ , **snake_case__ , ) def _snake_case ( self : Optional[Any] ) -> Dict[str, any]: _lowerCamelCase = copy.deepcopy(self.__dict__ ) _lowerCamelCase = self.backbone_config.to_dict() _lowerCamelCase = self.decoder_config.to_dict() _lowerCamelCase = self.__class__.model_type return output
234
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device _UpperCAmelCase : List[Any] = False class lowercase_ ( unittest.TestCase ): """simple docstring""" pass @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : List[str] ) -> Dict: _A = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) _A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _A = torch.manual_seed(0 ) _A = pipe( image=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=50, output_type='numpy', ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
107
'''simple docstring''' import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] ): return EnvironmentCommand() class lowercase_ ( _UpperCamelCase ): """simple docstring""" @staticmethod def __UpperCAmelCase ( UpperCamelCase__ : ArgumentParser ) -> List[str]: _A = parser.add_parser('env' ) download_parser.set_defaults(func=UpperCamelCase__ ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: _A = huggingface_hub.__version__ _A = 'not installed' _A = 'NA' if is_torch_available(): import torch _A = torch.__version__ _A = torch.cuda.is_available() _A = 'not installed' if is_transformers_available(): import transformers _A = transformers.__version__ _A = 'not installed' if is_accelerate_available(): import accelerate _A = accelerate.__version__ _A = 'not installed' if is_xformers_available(): import xformers _A = xformers.__version__ _A = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f'{pt_version} ({pt_cuda_available})', 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(UpperCamelCase__ ) ) return info @staticmethod def __UpperCAmelCase ( UpperCamelCase__ : List[Any] ) -> Dict: return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
107
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # General docstring __SCREAMING_SNAKE_CASE : str = 'ResNetConfig' # Base docstring __SCREAMING_SNAKE_CASE : Any = 'microsoft/resnet-50' __SCREAMING_SNAKE_CASE : Tuple = [1, 2_048, 7, 7] # Image classification docstring __SCREAMING_SNAKE_CASE : Optional[Any] = 'microsoft/resnet-50' __SCREAMING_SNAKE_CASE : Optional[int] = 'tiger cat' __SCREAMING_SNAKE_CASE : Tuple = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class __A (nn.Module): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu" ) ->Tuple: """simple docstring""" super().__init__() snake_case_ = nn.Convad( UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=kernel_size // 2 , bias=UpperCAmelCase_ ) snake_case_ = nn.BatchNormad(UpperCAmelCase_ ) snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCAmelCase ( self : int , UpperCAmelCase_ : Tensor ) ->Tensor: """simple docstring""" snake_case_ = self.convolution(UpperCAmelCase_ ) snake_case_ = self.normalization(UpperCAmelCase_ ) snake_case_ = self.activation(UpperCAmelCase_ ) return hidden_state class __A (nn.Module): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : ResNetConfig ) ->Tuple: """simple docstring""" super().__init__() snake_case_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) snake_case_ = config.num_channels def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Tensor ) ->Tensor: """simple docstring""" snake_case_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) snake_case_ = self.embedder(UpperCAmelCase_ ) snake_case_ = self.pooler(UpperCAmelCase_ ) return embedding class __A (nn.Module): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2 ) ->List[Any]: """simple docstring""" super().__init__() snake_case_ = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , stride=UpperCAmelCase_ , bias=UpperCAmelCase_ ) snake_case_ = nn.BatchNormad(UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tensor ) ->Tensor: """simple docstring""" snake_case_ = self.convolution(UpperCAmelCase_ ) snake_case_ = self.normalization(UpperCAmelCase_ ) return hidden_state class __A (nn.Module): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu" ) ->Tuple: """simple docstring""" super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = ( ResNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ ) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , activation=UpperCAmelCase_ ) , ) snake_case_ = ACTaFN[activation] def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = hidden_state snake_case_ = self.layer(UpperCAmelCase_ ) snake_case_ = self.shortcut(UpperCAmelCase_ ) hidden_state += residual snake_case_ = self.activation(UpperCAmelCase_ ) return hidden_state class __A (nn.Module): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu" , UpperCAmelCase_ : int = 4 ) ->Tuple: """simple docstring""" super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = out_channels // reduction snake_case_ = ( ResNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ ) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , activation=UpperCAmelCase_ ) , ) snake_case_ = ACTaFN[activation] def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->List[str]: """simple docstring""" snake_case_ = hidden_state snake_case_ = self.layer(UpperCAmelCase_ ) snake_case_ = self.shortcut(UpperCAmelCase_ ) hidden_state += residual snake_case_ = self.activation(UpperCAmelCase_ ) return hidden_state class __A (nn.Module): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : ResNetConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , ) ->List[Any]: """simple docstring""" super().__init__() snake_case_ = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer snake_case_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ , activation=config.hidden_act ) , *[layer(UpperCAmelCase_ , UpperCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tensor ) ->Tensor: """simple docstring""" snake_case_ = input for layer in self.layers: snake_case_ = layer(UpperCAmelCase_ ) return hidden_state class __A (nn.Module): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : ResNetConfig ) ->List[Any]: """simple docstring""" super().__init__() snake_case_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( UpperCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(UpperCAmelCase_ , config.depths[1:] ): self.stages.append(ResNetStage(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , depth=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True ) ->BaseModelOutputWithNoAttention: """simple docstring""" snake_case_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) snake_case_ = stage_module(UpperCAmelCase_ ) if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , ) class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = ResNetConfig __lowercase: List[Any] = """resnet""" __lowercase: Optional[Any] = """pixel_values""" __lowercase: List[Any] = True def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : str ) ->Tuple: """simple docstring""" if isinstance(UpperCAmelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(UpperCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCAmelCase ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=False ) ->Tuple: """simple docstring""" if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = value __SCREAMING_SNAKE_CASE : int = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __SCREAMING_SNAKE_CASE : List[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( """The bare ResNet model outputting raw features without any specific head on top.""" , snake_case__ , ) class __A (snake_case__): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : Tuple ) ->Union[str, Any]: """simple docstring""" super().__init__(UpperCAmelCase_ ) snake_case_ = config snake_case_ = ResNetEmbeddings(UpperCAmelCase_ ) snake_case_ = ResNetEncoder(UpperCAmelCase_ ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None ) ->BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.embedder(UpperCAmelCase_ ) snake_case_ = self.encoder( UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ ) snake_case_ = encoder_outputs[0] snake_case_ = self.pooler(UpperCAmelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , snake_case__ , ) class __A (snake_case__): '''simple docstring''' def __init__( self : int , UpperCAmelCase_ : int ) ->Any: """simple docstring""" super().__init__(UpperCAmelCase_ ) snake_case_ = config.num_labels snake_case_ = ResNetModel(UpperCAmelCase_ ) # classification head snake_case_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ) ->ImageClassifierOutputWithNoAttention: """simple docstring""" snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.resnet(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(UpperCAmelCase_ ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = """single_label_classification""" else: snake_case_ = """multi_label_classification""" if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( """ ResNet backbone, to be used with frameworks like DETR and MaskFormer. """ , snake_case__ , ) class __A (snake_case__ , snake_case__): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Any ) ->Union[str, Any]: """simple docstring""" super().__init__(UpperCAmelCase_ ) super()._init_backbone(UpperCAmelCase_ ) snake_case_ = [config.embedding_size] + config.hidden_sizes snake_case_ = ResNetEmbeddings(UpperCAmelCase_ ) snake_case_ = ResNetEncoder(UpperCAmelCase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase_ ) @replace_return_docstrings(output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC ) def lowerCAmelCase ( self : str , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None ) ->BackboneOutput: """simple docstring""" snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = self.embedder(UpperCAmelCase_ ) snake_case_ = self.encoder(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ ) snake_case_ = outputs.hidden_states snake_case_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=UpperCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase_ , )
713
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __A : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : Dict=24 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=1_000 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = scope snake_case_ = range_bbox def lowerCAmelCase ( self : Tuple ) ->int: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: snake_case_ = bbox[i, j, 3] snake_case_ = bbox[i, j, 1] snake_case_ = t if bbox[i, j, 2] < bbox[i, j, 0]: snake_case_ = bbox[i, j, 2] snake_case_ = bbox[i, j, 0] snake_case_ = t snake_case_ = None if self.use_input_mask: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , ) ->str: """simple docstring""" snake_case_ = LiltModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , ) ->Dict: """simple docstring""" snake_case_ = self.num_labels snake_case_ = LiltForTokenClassification(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() snake_case_ = model( UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , ) ->Dict: """simple docstring""" snake_case_ = LiltForQuestionAnswering(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() snake_case_ = model( UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Optional[int] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __lowercase: Optional[Any] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __lowercase: Union[str, Any] = False __lowercase: List[str] = False def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" return True def lowerCAmelCase ( self : Dict ) ->Union[str, Any]: """simple docstring""" snake_case_ = LiltModelTester(self ) snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self : str ) ->List[Any]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : List[str] ) ->int: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ = type self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ ) @slow def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = LiltModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) @require_torch @slow class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(UpperCAmelCase_ ) snake_case_ = torch.tensor([[1, 2]] , device=UpperCAmelCase_ ) snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ ) # forward pass with torch.no_grad(): snake_case_ = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ ) snake_case_ = torch.Size([1, 2, 768] ) snake_case_ = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCAmelCase_ , ) self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3 ) )
2
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : int = {} class A__ ( A__ ): """simple docstring""" _lowercase = 'llama' _lowercase = ['past_key_values'] def __init__( self : Dict , lowerCamelCase__ : str=32_000 , lowerCamelCase__ : Dict=4_096 , lowerCamelCase__ : Tuple=11_008 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Tuple=32 , lowerCamelCase__ : int=None , lowerCamelCase__ : Union[str, Any]="silu" , lowerCamelCase__ : Any=2_048 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : Dict=1E-6 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : Union[str, Any] , ): a__ : List[str] = vocab_size a__ : str = max_position_embeddings a__ : Dict = hidden_size a__ : List[str] = intermediate_size a__ : Dict = num_hidden_layers a__ : Optional[Any] = num_attention_heads # for backward compatibility if num_key_value_heads is None: a__ : Tuple = num_attention_heads a__ : str = num_key_value_heads a__ : Dict = hidden_act a__ : Optional[int] = initializer_range a__ : str = rms_norm_eps a__ : Optional[Any] = pretraining_tp a__ : int = use_cache a__ : Union[str, Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Any ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) a__ : Tuple = self.rope_scaling.get("type" , lowerCamelCase__ ) a__ : Tuple = self.rope_scaling.get("factor" , lowerCamelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
37
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a : Tuple = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : Dict = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys __a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
534
0
'''simple docstring''' from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]: _lowercase : int = k_size // 2 _lowercase : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] _lowercase : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(_lowercase ) + square(_lowercase )) / (2 * square(_lowercase )) ) return g def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[int]: _lowercase : Any = image.shape[0], image.shape[1] # dst image height and width _lowercase : List[str] = height - k_size + 1 _lowercase : List[str] = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows _lowercase : Optional[int] = zeros((dst_height * dst_width, k_size * k_size) ) _lowercase : List[str] = 0 for i, j in product(range(_lowercase ), range(_lowercase ) ): _lowercase : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] ) _lowercase : Optional[Any] = window row += 1 # turn the kernel into shape(k*k, 1) _lowercase : Optional[Any] = gen_gaussian_kernel(_lowercase, _lowercase ) _lowercase : str = ravel(_lowercase ) # reshape and get the dst image _lowercase : List[str] = dot(_lowercase, _lowercase ).reshape(_lowercase, _lowercase ).astype(_lowercase ) return dst if __name__ == "__main__": # read original image _A : Optional[int] =imread(r'''../image_data/lena.jpg''') # turn image in gray scale value _A : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _A : Dict =gaussian_filter(gray, 3, sigma=1) _A : Optional[int] =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow('''gaussian filter with 3x3 mask''', gaussianaxa) imshow('''gaussian filter with 5x5 mask''', gaussianaxa) waitKey()
709
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _A : Optional[int] =logging.get_logger(__name__) @add_end_docstrings(A ) class lowerCamelCase__ ( A ): '''simple docstring''' def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int: '''simple docstring''' super().__init__(**UpperCamelCase_ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]: '''simple docstring''' return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = {} if "candidate_labels" in kwargs: _lowercase : Union[str, Any] = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _lowercase : int = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]: '''simple docstring''' _lowercase : Dict = load_image(UpperCamelCase_ ) _lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) _lowercase : Optional[Any] = candidate_labels _lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels] _lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ ) _lowercase : Any = [text_inputs] return inputs def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = model_inputs.pop('candidate_labels' ) _lowercase : List[str] = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , UpperCamelCase_ ): _lowercase : Optional[int] = text_inputs[0] else: # Batching case. _lowercase : List[str] = text_inputs[0][0] _lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Optional[Any] = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' _lowercase : Optional[int] = model_outputs.pop('candidate_labels' ) _lowercase : Optional[int] = model_outputs['logits'][0] if self.framework == "pt": _lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 ) _lowercase : Tuple = probs.tolist() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : List[Any] = [scores] elif self.framework == "tf": _lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 ) _lowercase : List[Any] = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _lowercase : List[Any] = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] ) ] return result
4
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(A ) self.assertTrue(isinstance(dc.token_ids, A ) ) with self.assertRaises(A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(A ): DisjunctiveConstraint(A ) # fails here def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [[1, 2, 3], [1, 2, 4]] SCREAMING_SNAKE_CASE : List[str] = DisjunctiveConstraint(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = dc.update(1 ) SCREAMING_SNAKE_CASE : Dict = stepped is True and completed is False and reset is False self.assertTrue(A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = dc.update(2 ) SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(3 ) SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(A ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
28
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class _UpperCamelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self : Optional[Any] , _lowerCamelCase : float , _lowerCamelCase : Callable , _lowerCamelCase : int , _lowerCamelCase : float = 1.0 , _lowerCamelCase : str = None , ): '''simple docstring''' super().__init__() __lowerCamelCase : Dict = initial_learning_rate __lowerCamelCase : Any = warmup_steps __lowerCamelCase : Optional[int] = power __lowerCamelCase : str = decay_schedule_fn __lowerCamelCase : Union[str, Any] = name def __call__( self : List[str] , _lowerCamelCase : int ): '''simple docstring''' with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowerCamelCase : Dict = tf.cast(_lowerCamelCase , tf.floataa ) __lowerCamelCase : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa ) __lowerCamelCase : List[str] = global_step_float / warmup_steps_float __lowerCamelCase : List[str] = self.initial_learning_rate * tf.math.pow(_lowerCamelCase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_lowerCamelCase , ) def _snake_case ( self : Any ): '''simple docstring''' return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def _UpperCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.9_9_9 , UpperCAmelCase : float = 1e-8 , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : Optional[List[str]] = None , ): """simple docstring""" __lowerCamelCase : str = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase , ) if num_warmup_steps: __lowerCamelCase : str = WarmUp( initial_learning_rate=UpperCAmelCase , decay_schedule_fn=UpperCAmelCase , warmup_steps=UpperCAmelCase , ) if weight_decay_rate > 0.0: __lowerCamelCase : List[Any] = AdamWeightDecay( learning_rate=UpperCAmelCase , weight_decay_rate=UpperCAmelCase , beta_a=UpperCAmelCase , beta_a=UpperCAmelCase , epsilon=UpperCAmelCase , clipnorm=UpperCAmelCase , global_clipnorm=UpperCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=UpperCAmelCase , ) else: __lowerCamelCase : Optional[int] = tf.keras.optimizers.Adam( learning_rate=UpperCAmelCase , beta_a=UpperCAmelCase , beta_a=UpperCAmelCase , epsilon=UpperCAmelCase , clipnorm=UpperCAmelCase , global_clipnorm=UpperCAmelCase , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class _UpperCamelCase ( A ): '''simple docstring''' def __init__( self : Tuple , _lowerCamelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _lowerCamelCase : float = 0.9 , _lowerCamelCase : float = 0.999 , _lowerCamelCase : float = 1E-7 , _lowerCamelCase : bool = False , _lowerCamelCase : float = 0.0 , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : str = "AdamWeightDecay" , **_lowerCamelCase : Union[str, Any] , ): '''simple docstring''' super().__init__(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) __lowerCamelCase : Union[str, Any] = weight_decay_rate __lowerCamelCase : Tuple = include_in_weight_decay __lowerCamelCase : Optional[Any] = exclude_from_weight_decay @classmethod def _snake_case ( cls : Union[str, Any] , _lowerCamelCase : List[Any] ): '''simple docstring''' __lowerCamelCase : Optional[Any] = {"""WarmUp""": WarmUp} return super(_lowerCamelCase , cls ).from_config(_lowerCamelCase , custom_objects=_lowerCamelCase ) def _snake_case ( self : str , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ): '''simple docstring''' super(_lowerCamelCase , self )._prepare_local(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __lowerCamelCase : Optional[Any] = tf.constant( self.weight_decay_rate , name="""adam_weight_decay_rate""" ) def _snake_case ( self : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ): '''simple docstring''' __lowerCamelCase : Optional[Any] = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , ) return tf.no_op() def _snake_case ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : str=None , **_lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCamelCase , __lowerCamelCase : Union[str, Any] = list(zip(*_lowerCamelCase ) ) return super(_lowerCamelCase , self ).apply_gradients(zip(_lowerCamelCase , _lowerCamelCase ) , name=_lowerCamelCase , **_lowerCamelCase ) def _snake_case ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ): '''simple docstring''' if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowerCamelCase : Union[str, Any] = apply_state or {} __lowerCamelCase : Union[str, Any] = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowerCamelCase : List[str] = self._fallback_apply_state(_lowerCamelCase , _lowerCamelCase ) __lowerCamelCase : Dict = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def _snake_case ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict=None ): '''simple docstring''' __lowerCamelCase , __lowerCamelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , _lowerCamelCase ) __lowerCamelCase : Dict = self._decay_weights_op(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) with tf.control_dependencies([decay] ): return super(_lowerCamelCase , self )._resource_apply_dense(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple=None ): '''simple docstring''' __lowerCamelCase , __lowerCamelCase : int = self._get_lr(var.device , var.dtype.base_dtype , _lowerCamelCase ) __lowerCamelCase : str = self._decay_weights_op(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) with tf.control_dependencies([decay] ): return super(_lowerCamelCase , self )._resource_apply_sparse(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) def _snake_case ( self : int ): '''simple docstring''' __lowerCamelCase : int = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def _snake_case ( self : Dict , _lowerCamelCase : List[str] ): '''simple docstring''' if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(_lowerCamelCase , _lowerCamelCase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(_lowerCamelCase , _lowerCamelCase ) is not None: return False return True class _UpperCamelCase ( A ): '''simple docstring''' def __init__( self : Union[str, Any] ): '''simple docstring''' __lowerCamelCase : Optional[int] = [] __lowerCamelCase : int = None @property def _snake_case ( self : Tuple ): '''simple docstring''' if self._accum_steps is None: __lowerCamelCase : Any = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=_lowerCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def _snake_case ( self : Any ): '''simple docstring''' if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : List[str] , _lowerCamelCase : List[str] ): '''simple docstring''' if not self._gradients: __lowerCamelCase : List[str] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(_lowerCamelCase ) , trainable=_lowerCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(_lowerCamelCase ) != len(self._gradients ): raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_lowerCamelCase )}""" ) for accum_gradient, gradient in zip(self._gradients , _lowerCamelCase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(_lowerCamelCase ) self._accum_steps.assign_add(1 ) def _snake_case ( self : Any ): '''simple docstring''' if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(_lowerCamelCase ) )
519
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 7 , UpperCamelCase__ : int = 1_0_0_0_0_0_0 ): """simple docstring""" __UpperCAmelCase = 0 __UpperCAmelCase = 1 for current_denominator in range(1 , limit + 1 ): __UpperCAmelCase = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: __UpperCAmelCase = current_numerator __UpperCAmelCase = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
654
'''simple docstring''' import heapq import sys import numpy as np __lowerCAmelCase : Any = tuple[int, int] class A : def __init__( self : Optional[int] ) -> int: __UpperCAmelCase = [] __UpperCAmelCase = set() def snake_case__ ( self : Optional[Any] ) -> List[Any]: if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def snake_case__ ( self : Dict ) -> Optional[int]: return len(self.elements ) == 0 def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]: if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__a ) else: # update # print("update", item) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def snake_case__ ( self : int , __a : Any ) -> int: if item in self.set: self.set.remove(__a ) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def snake_case__ ( self : List[str] ) -> Dict: return self.elements[0][1] def snake_case__ ( self : Any ) -> List[str]: ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) self.set.remove(__a ) return (priority, item) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # euclidean distance __UpperCAmelCase = np.array(UpperCamelCase__ ) __UpperCAmelCase = np.array(UpperCamelCase__ ) return np.linalg.norm(a - b ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # integer division by time variable return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ): """simple docstring""" __UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ ) return ans def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ): """simple docstring""" __UpperCAmelCase = np.chararray((n, n) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): __UpperCAmelCase = '''*''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (j, (n - 1) - i) in blocks: __UpperCAmelCase = '''#''' __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[goal] while x != start: ((__UpperCAmelCase) , (__UpperCAmelCase)) = x # print(x) __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[x] __UpperCAmelCase = '''-''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) __UpperCAmelCase = back_pointer[goal] while x != start: print(UpperCamelCase__ , end=''' ''' ) __UpperCAmelCase = back_pointer[x] print(UpperCamelCase__ ) sys.exit() def lowerCAmelCase ( UpperCamelCase__ : TPos ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ): """simple docstring""" for itera in range(UpperCamelCase__ ): open_list[itera].remove_element(UpperCamelCase__ ) # print("s", s) # print("j", j) ((__UpperCAmelCase) , (__UpperCAmelCase)) = s __UpperCAmelCase = (x - 1, y) __UpperCAmelCase = (x + 1, y) __UpperCAmelCase = (x, y + 1) __UpperCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(UpperCamelCase__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(UpperCamelCase__ ) __UpperCAmelCase = -1 __UpperCAmelCase = float('''inf''' ) if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1: __UpperCAmelCase = g_function[s] + 1 __UpperCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) ) if neighbours not in close_list_inad: for var in range(1 , UpperCamelCase__ ): if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ): open_list[j].put( UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list __lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __lowerCAmelCase : List[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __lowerCAmelCase : Dict = make_common_ground() __lowerCAmelCase : int = blocks_blk # hyper parameters __lowerCAmelCase : Dict = 1 __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Union[str, Any] = 20 __lowerCAmelCase : Any = 3 # one consistent and two other inconsistent # start and end destination __lowerCAmelCase : Optional[Any] = (0, 0) __lowerCAmelCase : Any = (n - 1, n - 1) __lowerCAmelCase : Optional[int] = 1 def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = {start: 0, goal: float('''inf''' )} __UpperCAmelCase = {start: -1, goal: -1} __UpperCAmelCase = [] __UpperCAmelCase = set() for i in range(UpperCamelCase__ ): open_list.append(PriorityQueue() ) open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = [] __UpperCAmelCase = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , UpperCamelCase__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_inad.append(UpperCamelCase__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase = open_list[0].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_anchor.append(UpperCamelCase__ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(UpperCamelCase__ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
654
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu a = [ '''EAGER''', '''AOT_EAGER''', '''INDUCTOR''', '''NVFUSER''', '''AOT_NVFUSER''', '''AOT_CUDAGRAPHS''', '''OFI''', '''FX2TRT''', '''ONNXRT''', '''IPEX''', ] def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : Any=None , _snake_case : List[str]=None ) -> Optional[int]: '''simple docstring''' _A = True while ask_again: _A = input(_snake_case ) try: if default is not None and len(_snake_case ) == 0: return default return convert_value(_snake_case ) if convert_value is not None else result except Exception: if error_message is not None: print(_snake_case ) def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any]=[] , _snake_case : str=None , _snake_case : Tuple=0 ) -> Union[str, Any]: '''simple docstring''' _A = BulletMenu(_snake_case , _snake_case ) _A = menu.run(default_choice=_snake_case ) return convert_value(_snake_case ) if convert_value is not None else result def _snake_case ( _snake_case : str ) -> Optional[Any]: '''simple docstring''' _A = int(_snake_case ) return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] ) def _snake_case ( _snake_case : Any ) -> Tuple: '''simple docstring''' _A = int(_snake_case ) return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] ) def _snake_case ( _snake_case : List[Any] ) -> Optional[int]: '''simple docstring''' _A = int(_snake_case ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _snake_case ( _snake_case : Tuple ) -> Union[str, Any]: '''simple docstring''' _A = int(_snake_case ) return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] ) def _snake_case ( _snake_case : Optional[int] ) -> List[str]: '''simple docstring''' _A = int(_snake_case ) return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] ) def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class lowercase_ ( argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ): _A = super()._format_usage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = usage.replace('<command> [<args>] ' , '' ) return usage
7
def A ( _lowerCamelCase ): '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : Union[str, Any] = F"Input value of [number={number}] must be an integer" raise TypeError(_lowerCamelCase ) if number < 1: _lowerCAmelCase : Tuple = F"Input value of [number={number}] must be > 0" raise ValueError(_lowerCamelCase ) _lowerCAmelCase : Dict = 1 for i in range(1 , _lowerCamelCase ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
500
0
def __lowerCAmelCase ( __magic_name__ = 1_0_0_0_0_0_0 ): '''simple docstring''' _lowercase: Optional[int] = set(range(3 , snake_case__ , 2 ) ) primes.add(2 ) for p in range(3 , snake_case__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , snake_case__ , snake_case__ ) ) ) _lowercase: Tuple = [float(snake_case__ ) for n in range(limit + 1 )] for p in primes: for n in range(snake_case__ , limit + 1 , snake_case__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
717
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ): _lowercase: List[Any] = [0 for i in range(r + 1 )] # nc0 = 1 _lowercase: Dict = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _lowercase: str = min(__magic_name__ , __magic_name__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
206
0
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( UpperCamelCase__ ): lowerCamelCase__ = (DDIMParallelScheduler,) lowerCamelCase__ = (('eta', 0.0), ('num_inference_steps', 50)) def __a ( self , **snake_case_ ) -> Any: SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''num_train_timesteps''': 1_000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**snake_case_ ) return config def __a ( self , **snake_case_ ) -> Union[str, Any]: SCREAMING_SNAKE_CASE : Optional[Any] =self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Tuple =self.get_scheduler_config(**snake_case_ ) SCREAMING_SNAKE_CASE : Union[str, Any] =scheduler_class(**snake_case_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] =10, 0.0 SCREAMING_SNAKE_CASE : int =self.dummy_model() SCREAMING_SNAKE_CASE : Tuple =self.dummy_sample_deter scheduler.set_timesteps(snake_case_ ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : List[str] =model(snake_case_ , snake_case_ ) SCREAMING_SNAKE_CASE : str =scheduler.step(snake_case_ , snake_case_ , snake_case_ , snake_case_ ).prev_sample return sample def __a ( self ) -> Any: for timesteps in [100, 500, 1_000]: self.check_over_configs(num_train_timesteps=snake_case_ ) def __a ( self ) -> str: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=snake_case_ ) SCREAMING_SNAKE_CASE : Union[str, Any] =self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Union[str, Any] =self.get_scheduler_config(steps_offset=1 ) SCREAMING_SNAKE_CASE : Any =scheduler_class(**snake_case_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def __a ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ ) def __a ( self ) -> List[str]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case_ ) def __a ( self ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case_ ) def __a ( self ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case_ ) def __a ( self ) -> Tuple: for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=snake_case_ ) def __a ( self ) -> int: for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=snake_case_ ) def __a ( self ) -> Union[str, Any]: self.check_over_configs(thresholding=snake_case_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , ) def __a ( self ) -> str: for t in [1, 10, 49]: self.check_over_forward(time_step=snake_case_ ) def __a ( self ) -> Any: for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=snake_case_ , num_inference_steps=snake_case_ ) def __a ( self ) -> Optional[int]: for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=snake_case_ , eta=snake_case_ ) def __a ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE : Any =self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] =self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] =scheduler_class(**snake_case_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def __a ( self ) -> List[str]: SCREAMING_SNAKE_CASE : Union[str, Any] =self.scheduler_classes[0] SCREAMING_SNAKE_CASE : List[Any] =self.get_scheduler_config() SCREAMING_SNAKE_CASE : int =scheduler_class(**snake_case_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any =10, 0.0 scheduler.set_timesteps(snake_case_ ) SCREAMING_SNAKE_CASE : Dict =self.dummy_model() SCREAMING_SNAKE_CASE : Union[str, Any] =self.dummy_sample_deter SCREAMING_SNAKE_CASE : Dict =self.dummy_sample_deter + 0.1 SCREAMING_SNAKE_CASE : Dict =self.dummy_sample_deter - 0.1 SCREAMING_SNAKE_CASE : Tuple =samplea.shape[0] SCREAMING_SNAKE_CASE : Any =torch.stack([samplea, samplea, samplea] , dim=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] =torch.arange(snake_case_ )[0:3, None].repeat(1 , snake_case_ ) SCREAMING_SNAKE_CASE : Dict =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) SCREAMING_SNAKE_CASE : Optional[Any] =scheduler.batch_step_no_noise(snake_case_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case_ ) SCREAMING_SNAKE_CASE : Optional[Any] =torch.sum(torch.abs(snake_case_ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] =torch.mean(torch.abs(snake_case_ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def __a ( self ) -> str: SCREAMING_SNAKE_CASE : Dict =self.full_loop() SCREAMING_SNAKE_CASE : Any =torch.sum(torch.abs(snake_case_ ) ) SCREAMING_SNAKE_CASE : Dict =torch.mean(torch.abs(snake_case_ ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.22_3967 ) < 1E-3 def __a ( self ) -> Any: SCREAMING_SNAKE_CASE : Dict =self.full_loop(prediction_type='''v_prediction''' ) SCREAMING_SNAKE_CASE : List[Any] =torch.sum(torch.abs(snake_case_ ) ) SCREAMING_SNAKE_CASE : List[Any] =torch.mean(torch.abs(snake_case_ ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def __a ( self ) -> Tuple: # We specify different beta, so that the first alpha is 0.99 SCREAMING_SNAKE_CASE : List[str] =self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.01 ) SCREAMING_SNAKE_CASE : List[Any] =torch.sum(torch.abs(snake_case_ ) ) SCREAMING_SNAKE_CASE : int =torch.mean(torch.abs(snake_case_ ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def __a ( self ) -> Optional[int]: # We specify different beta, so that the first alpha is 0.99 SCREAMING_SNAKE_CASE : Any =self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.01 ) SCREAMING_SNAKE_CASE : List[str] =torch.sum(torch.abs(snake_case_ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] =torch.mean(torch.abs(snake_case_ ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
258
import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline _A = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""") def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a , __a=False , ) -> Optional[Any]: """simple docstring""" output_path.parent.mkdir(parents=__a , exist_ok=__a ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __a , __a , f=output_path.as_posix() , input_names=__a , output_names=__a , dynamic_axes=__a , do_constant_folding=__a , use_external_data_format=__a , enable_onnx_checker=__a , opset_version=__a , ) else: export( __a , __a , f=output_path.as_posix() , input_names=__a , output_names=__a , dynamic_axes=__a , do_constant_folding=__a , opset_version=__a , ) @torch.no_grad() def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] =torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): SCREAMING_SNAKE_CASE : Dict ='''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: SCREAMING_SNAKE_CASE : int ='''cpu''' SCREAMING_SNAKE_CASE : Union[str, Any] =StableDiffusionPipeline.from_pretrained(__a , torch_dtype=__a ).to(__a ) SCREAMING_SNAKE_CASE : Optional[Any] =Path(__a ) # TEXT ENCODER SCREAMING_SNAKE_CASE : List[Any] =pipeline.text_encoder.config.max_position_embeddings SCREAMING_SNAKE_CASE : Tuple =pipeline.text_encoder.config.hidden_size SCREAMING_SNAKE_CASE : Optional[int] =pipeline.tokenizer( '''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__a , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''sequence'''}, } , opset=__a , ) del pipeline.text_encoder # UNET SCREAMING_SNAKE_CASE : Optional[int] =pipeline.unet.config.in_channels SCREAMING_SNAKE_CASE : List[Any] =pipeline.unet.config.sample_size SCREAMING_SNAKE_CASE : str =output_path / '''unet''' / '''model.onnx''' onnx_export( pipeline.unet , model_args=( torch.randn(2 , __a , __a , __a ).to(device=__a , dtype=__a ), torch.randn(2 ).to(device=__a , dtype=__a ), torch.randn(2 , __a , __a ).to(device=__a , dtype=__a ), False, ) , output_path=__a , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''timestep''': {0: '''batch'''}, '''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''}, } , opset=__a , use_external_data_format=__a , ) SCREAMING_SNAKE_CASE : Optional[int] =str(unet_path.absolute().as_posix() ) SCREAMING_SNAKE_CASE : List[str] =os.path.dirname(__a ) SCREAMING_SNAKE_CASE : Optional[int] =onnx.load(__a ) # clean up existing tensor files shutil.rmtree(__a ) os.mkdir(__a ) # collate external tensor files into one onnx.save_model( __a , __a , save_as_external_data=__a , all_tensors_to_one_file=__a , location='''weights.pb''' , convert_attribute=__a , ) del pipeline.unet # VAE ENCODER SCREAMING_SNAKE_CASE : str =pipeline.vae SCREAMING_SNAKE_CASE : Any =vae_encoder.config.in_channels SCREAMING_SNAKE_CASE : List[str] =vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder SCREAMING_SNAKE_CASE : Union[str, Any] =lambda __a , __a : vae_encoder.encode(__a , __a )[0].sample() onnx_export( __a , model_args=( torch.randn(1 , __a , __a , __a ).to(device=__a , dtype=__a ), False, ) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=__a , ) # VAE DECODER SCREAMING_SNAKE_CASE : str =pipeline.vae SCREAMING_SNAKE_CASE : Optional[int] =vae_decoder.config.latent_channels SCREAMING_SNAKE_CASE : int =vae_decoder.config.out_channels # forward only through the decoder part SCREAMING_SNAKE_CASE : Dict =vae_encoder.decode onnx_export( __a , model_args=( torch.randn(1 , __a , __a , __a ).to(device=__a , dtype=__a ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=__a , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: SCREAMING_SNAKE_CASE : Any =pipeline.safety_checker SCREAMING_SNAKE_CASE : Dict =safety_checker.config.vision_config.num_channels SCREAMING_SNAKE_CASE : Optional[Any] =safety_checker.config.vision_config.image_size SCREAMING_SNAKE_CASE : List[str] =safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , __a , __a , __a , ).to(device=__a , dtype=__a ), torch.randn(1 , __a , __a , __a ).to(device=__a , dtype=__a ), ) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={ '''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''}, } , opset=__a , ) del pipeline.safety_checker SCREAMING_SNAKE_CASE : int =OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' ) SCREAMING_SNAKE_CASE : Dict =pipeline.feature_extractor else: SCREAMING_SNAKE_CASE : Optional[Any] =None SCREAMING_SNAKE_CASE : Optional[Any] =None SCREAMING_SNAKE_CASE : Union[str, Any] =OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=__a , feature_extractor=__a , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(__a ) print('''ONNX pipeline saved to''' , __a ) del pipeline del onnx_pipeline SCREAMING_SNAKE_CASE : str =OnnxStableDiffusionPipeline.from_pretrained(__a , provider='''CPUExecutionProvider''' ) print('''ONNX pipeline is loadable''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( """--model_path""", type=str, required=True, help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""", ) parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--opset""", default=14, type=int, help="""The version of the ONNX operator set to use.""", ) parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""") _A = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
258
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple=7 , __lowerCamelCase : int=3 , __lowerCamelCase : List[str]=1_0 , __lowerCamelCase : Any=1_8 , __lowerCamelCase : str=3_0 , __lowerCamelCase : Dict=4_0_0 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=[0.5, 0.5, 0.5] , __lowerCamelCase : Any=[0.5, 0.5, 0.5] , __lowerCamelCase : str=None , ) -> List[Any]: """simple docstring""" UpperCAmelCase = size if size is not None else {"""shortest_edge""": 1_8} UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = num_channels UpperCAmelCase = num_frames UpperCAmelCase = image_size UpperCAmelCase = min_resolution UpperCAmelCase = max_resolution UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_normalize UpperCAmelCase = image_mean UpperCAmelCase = image_std UpperCAmelCase = crop_size def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __lowercase ( __snake_case , unittest.TestCase ): UpperCamelCase = VivitImageProcessor if is_vision_available() else None def _lowercase ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase = VivitImageProcessingTester(self ) @property def _lowercase ( self : Optional[int] ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """size""" ) ) def _lowercase ( self : str ) -> Tuple: """simple docstring""" UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def _lowercase ( self : str ) -> str: """simple docstring""" UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for video in video_inputs: self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input UpperCAmelCase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def _lowercase ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for video in video_inputs: self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input UpperCAmelCase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def _lowercase ( self : List[Any] ) -> Any: """simple docstring""" UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for video in video_inputs: self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input UpperCAmelCase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
720
__a = [ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def _UpperCamelCase ( lowerCAmelCase_ ) ->int: UpperCAmelCase = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0} UpperCAmelCase = 0 UpperCAmelCase = 0 while place < len(lowerCAmelCase_ ): if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def _UpperCamelCase ( lowerCAmelCase_ ) ->str: UpperCAmelCase = [] for arabic, roman in ROMAN: ((UpperCAmelCase) , (UpperCAmelCase)) = divmod(lowerCAmelCase_ , lowerCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
627
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available lowercase : List[str] = { "configuration_audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : int = ["ASTFeatureExtractor"] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
542
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging a : Optional[int] = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = R"\w+[.]\d+" UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ ) for pat in pats: UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) ) return key def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": UpperCAmelCase : Union[str, Any] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ): '''simple docstring''' UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) ) UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ ) UpperCAmelCase : List[str] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase : Tuple = rename_key(__magic_name__ ) UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
679
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : str = logging.get_logger(__name__) snake_case : Union[str, Any] = { '''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class lowerCAmelCase__ ( UpperCAmelCase__ ): __A : str = 'wavlm' def __init__( self : Tuple , _A : Optional[int]=32 , _A : Dict=768 , _A : int=12 , _A : Optional[Any]=12 , _A : List[str]=3072 , _A : Tuple="gelu" , _A : str=0.1 , _A : List[Any]=0.1 , _A : str=0.1 , _A : int=0.0 , _A : Any=0.1 , _A : Optional[Any]=0.1 , _A : Dict=0.02 , _A : Tuple=1e-5 , _A : Optional[int]="group" , _A : List[Any]="gelu" , _A : List[Any]=(512, 512, 512, 512, 512, 512, 512) , _A : List[Any]=(5, 2, 2, 2, 2, 2, 2) , _A : Dict=(10, 3, 3, 3, 3, 2, 2) , _A : Union[str, Any]=False , _A : int=128 , _A : Tuple=16 , _A : str=320 , _A : List[str]=800 , _A : Dict=False , _A : int=True , _A : Union[str, Any]=0.05 , _A : int=10 , _A : Optional[Any]=2 , _A : Union[str, Any]=0.0 , _A : Optional[int]=10 , _A : Dict=320 , _A : Dict=2 , _A : Any=0.1 , _A : Union[str, Any]=100 , _A : str=256 , _A : int=256 , _A : Optional[Any]=0.1 , _A : Tuple="mean" , _A : Dict=False , _A : str=False , _A : Union[str, Any]=256 , _A : Tuple=(512, 512, 512, 512, 1500) , _A : List[str]=(5, 3, 3, 1, 1) , _A : Tuple=(1, 2, 3, 1, 1) , _A : Optional[int]=512 , _A : Any=80 , _A : int=0 , _A : Dict=1 , _A : List[str]=2 , _A : Tuple=False , _A : List[Any]=3 , _A : Optional[int]=2 , _A : Optional[Any]=3 , _A : Optional[Any]=None , **_A : List[str] , ): super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase) A__ : Union[str, Any] = hidden_size A__ : int = feat_extract_norm A__ : Any = feat_extract_activation A__ : Optional[int] = list(__lowerCAmelCase) A__ : Union[str, Any] = list(__lowerCAmelCase) A__ : Tuple = list(__lowerCAmelCase) A__ : Any = conv_bias A__ : Optional[int] = num_buckets A__ : Union[str, Any] = max_bucket_distance A__ : Any = num_conv_pos_embeddings A__ : Any = num_conv_pos_embedding_groups A__ : str = len(self.conv_dim) A__ : Tuple = num_hidden_layers A__ : Union[str, Any] = intermediate_size A__ : Union[str, Any] = hidden_act A__ : Union[str, Any] = num_attention_heads A__ : int = hidden_dropout A__ : Union[str, Any] = attention_dropout A__ : Optional[int] = activation_dropout A__ : Dict = feat_proj_dropout A__ : Dict = final_dropout A__ : Dict = layerdrop A__ : Dict = layer_norm_eps A__ : Union[str, Any] = initializer_range A__ : Optional[Any] = num_ctc_classes A__ : Tuple = vocab_size A__ : Union[str, Any] = do_stable_layer_norm A__ : Optional[int] = use_weighted_layer_sum A__ : List[Any] = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A__ : Tuple = apply_spec_augment A__ : Tuple = mask_time_prob A__ : Any = mask_time_length A__ : Dict = mask_time_min_masks A__ : Optional[Any] = mask_feature_prob A__ : str = mask_feature_length # parameters for pretraining with codevector quantized representations A__ : str = num_codevectors_per_group A__ : Optional[Any] = num_codevector_groups A__ : str = contrastive_logits_temperature A__ : Tuple = num_negatives A__ : List[str] = codevector_dim A__ : Tuple = proj_codevector_dim A__ : Optional[Any] = diversity_loss_weight # ctc loss A__ : List[str] = ctc_loss_reduction A__ : Optional[int] = ctc_zero_infinity # adapter A__ : List[Any] = add_adapter A__ : int = adapter_kernel_size A__ : Optional[Any] = adapter_stride A__ : str = num_adapter_layers A__ : int = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. A__ : int = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A__ : Union[str, Any] = list(__lowerCAmelCase) A__ : List[Any] = list(__lowerCAmelCase) A__ : Union[str, Any] = list(__lowerCAmelCase) A__ : Optional[Any] = xvector_output_dim @property def _lowercase ( self : Any): return functools.reduce(operator.mul , self.conv_stride , 1)
710
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class lowerCAmelCase__ ( ctypes.Structure ): # _fields is a specific attr expected by ctypes __A : Optional[Any] = [('size', ctypes.c_int), ('visible', ctypes.c_byte)] def snake_case__ ( ) -> List[Any]: """simple docstring""" if os.name == "nt": A__ : Optional[Any] = CursorInfo() A__ : Tuple = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) ) A__ : Any = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) ) elif os.name == "posix": sys.stdout.write("\033[?25l" ) sys.stdout.flush() def snake_case__ ( ) -> Dict: """simple docstring""" if os.name == "nt": A__ : List[str] = CursorInfo() A__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) ) A__ : int = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) ) elif os.name == "posix": sys.stdout.write("\033[?25h" ) sys.stdout.flush() @contextmanager def snake_case__ ( ) -> Optional[int]: """simple docstring""" try: hide_cursor() yield finally: show_cursor()
182
0
def UpperCAmelCase_ ( _A ): '''simple docstring''' if not isinstance(_A , _A ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) SCREAMING_SNAKE_CASE__ = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
493
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _UpperCamelCase ( _A ) -> str: """simple docstring""" _UpperCAmelCase = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _UpperCAmelCase = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: _UpperCAmelCase = 4 _UpperCAmelCase = 4_8 _UpperCAmelCase = """pixelshuffle_aux""" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _UpperCAmelCase = [6, 6, 6, 6] _UpperCAmelCase = 6_0 _UpperCAmelCase = [6, 6, 6, 6] _UpperCAmelCase = """pixelshuffledirect""" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _UpperCAmelCase = 4 _UpperCAmelCase = """nearest+conv""" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: _UpperCAmelCase = 1 _UpperCAmelCase = 1 _UpperCAmelCase = 1_2_6 _UpperCAmelCase = 7 _UpperCAmelCase = 255.0 _UpperCAmelCase = """""" return config def _UpperCamelCase ( _A , _A ) -> Tuple: """simple docstring""" if "patch_embed.proj" in name and "layers" not in name: _UpperCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: _UpperCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" ) if "layers" in name: _UpperCAmelCase = name.replace("""layers""" , """encoder.stages""" ) if "residual_group.blocks" in name: _UpperCAmelCase = name.replace("""residual_group.blocks""" , """layers""" ) if "attn.proj" in name: _UpperCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: _UpperCAmelCase = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: _UpperCAmelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _UpperCAmelCase = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: _UpperCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _UpperCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: _UpperCAmelCase = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: _UpperCAmelCase = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: _UpperCAmelCase = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: _UpperCAmelCase = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if "patch_embed.proj" in name: _UpperCAmelCase = name.replace("""patch_embed.proj""" , """patch_embed.projection""" ) if name == "norm.weight": _UpperCAmelCase = """layernorm.weight""" if name == "norm.bias": _UpperCAmelCase = """layernorm.bias""" if "conv_first" in name: _UpperCAmelCase = name.replace("""conv_first""" , """first_convolution""" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: _UpperCAmelCase = name.replace("""conv_last""" , """final_convolution""" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: _UpperCAmelCase = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" ) if "upsample.0" in name: _UpperCAmelCase = name.replace("""upsample.0""" , """upsample.convolution_0""" ) if "upsample.2" in name: _UpperCAmelCase = name.replace("""upsample.2""" , """upsample.convolution_1""" ) _UpperCAmelCase = """upsample.""" + name elif config.upsampler == "pixelshuffledirect": _UpperCAmelCase = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" ) _UpperCAmelCase = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" ) else: pass else: _UpperCAmelCase = """swin2sr.""" + name return name def _UpperCamelCase ( _A , _A ) -> int: """simple docstring""" for key in orig_state_dict.copy().keys(): _UpperCAmelCase = orig_state_dict.pop(_A ) if "qkv" in key: _UpperCAmelCase = key.split(""".""" ) _UpperCAmelCase = int(key_split[1] ) _UpperCAmelCase = int(key_split[4] ) _UpperCAmelCase = config.embed_dim if "weight" in key: _UpperCAmelCase = val[:dim, :] _UpperCAmelCase = val[dim : dim * 2, :] _UpperCAmelCase = val[-dim:, :] else: _UpperCAmelCase = val[:dim] _UpperCAmelCase = val[dim : dim * 2] _UpperCAmelCase = val[-dim:] pass else: _UpperCAmelCase = val return orig_state_dict def _UpperCamelCase ( _A , _A , _A ) -> List[Any]: """simple docstring""" _UpperCAmelCase = get_config(_A ) _UpperCAmelCase = SwinaSRForImageSuperResolution(_A ) model.eval() _UpperCAmelCase = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" ) _UpperCAmelCase = convert_state_dict(_A , _A ) _UpperCAmelCase ,_UpperCAmelCase = model.load_state_dict(_A , strict=_A ) if len(_A ) > 0: raise ValueError("""Missing keys when converting: {}""".format(_A ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F"""Unexpected key {key} in state_dict""" ) # verify values _UpperCAmelCase = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true""" _UpperCAmelCase = Image.open(requests.get(_A , stream=_A ).raw ).convert("""RGB""" ) _UpperCAmelCase = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values _UpperCAmelCase = 1_2_6 if """Jpeg""" in checkpoint_url else 2_5_6 _UpperCAmelCase = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) _UpperCAmelCase = transforms(_A ).unsqueeze(0 ) if config.num_channels == 1: _UpperCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 ) _UpperCAmelCase = model(_A ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: _UpperCAmelCase = torch.Size([1, 3, 5_1_2, 5_1_2] ) _UpperCAmelCase = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _UpperCAmelCase = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) _UpperCAmelCase = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here _UpperCAmelCase = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) _UpperCAmelCase = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _UpperCAmelCase = torch.Size([1, 3, 5_1_2, 5_1_2] ) _UpperCAmelCase = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _UpperCAmelCase = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) _UpperCAmelCase = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}""" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _A , atol=1e-3 ) print("""Looks ok!""" ) _UpperCAmelCase = { """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": ( """swin2SR-classical-sr-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": ( """swin2SR-classical-sr-x4-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": ( """swin2SR-compressed-sr-x4-48""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": ( """swin2SR-lightweight-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": ( """swin2SR-realworld-sr-x4-64-bsrgan-psnr""" ), } _UpperCAmelCase = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_A ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_A ) if push_to_hub: model.push_to_hub(F"""caidas/{model_name}""" ) processor.push_to_hub(F"""caidas/{model_name}""" ) if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') a : str = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
555
0
"""simple docstring""" from __future__ import annotations _lowerCAmelCase = 1.6_021E-19 # units = C def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
16
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X20000 and cp <= 0X2A6DF) # or (cp >= 0X2A700 and cp <= 0X2B73F) # or (cp >= 0X2B740 and cp <= 0X2B81F) # or (cp >= 0X2B820 and cp <= 0X2CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2F800 and cp <= 0X2FA1F) # ): # return True return False def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' for char in word: _lowerCAmelCase : Dict = ord(_lowerCamelCase ) if not _is_chinese_char(_lowerCamelCase ): return 0 return 1 def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = set() for token in tokens: _lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase ) if chinese_word: word_set.add(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase ) return word_list def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if not chinese_word_set: return bert_tokens _lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] ) _lowerCAmelCase : str = bert_tokens _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase ) while start < end: _lowerCAmelCase : Dict = True if is_chinese(bert_word[start] ): _lowerCAmelCase : str = min(end - start , _lowerCamelCase ) for i in range(_lowerCamelCase , 1 , -1 ): _lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _lowerCAmelCase : Tuple = '##' + bert_word[j] _lowerCAmelCase : Optional[int] = start + i _lowerCAmelCase : Any = False break if single_word: start += 1 return bert_word def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Dict = [] for i in range(0 , len(_lowerCamelCase ) , 100 ): _lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0] _lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res] ltp_res.extend(_lowerCamelCase ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) _lowerCAmelCase : int = [] for i in range(0 , len(_lowerCamelCase ) , 100 ): _lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 ) bert_res.extend(res['input_ids'] ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = [] for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : Optional[int] = [] for id in input_ids: _lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase ) input_tokens.append(_lowerCamelCase ) _lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : List[str] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowerCamelCase ): if token[:2] == "##": _lowerCAmelCase : List[Any] = token[2:] # save chinese tokens' pos if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ): ref_id.append(_lowerCamelCase ) ref_ids.append(_lowerCamelCase ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) return ref_ids def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' with open(args.file_name , 'r' , encoding='utf-8' ) as f: _lowerCAmelCase : int = f.readlines() _lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device _lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert ) _lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: _lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids] f.writelines(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") _lowerCAmelCase = parser.parse_args() main(args)
16
1
'''simple docstring''' # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( 'pipelines_utils', '0.22.0', 'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.', standard_warn=False, stacklevel=3, )
541
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def __UpperCAmelCase ( A : Optional[int] ) -> List[Any]: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E_00 and cp <= 0X9F_FF) or (cp >= 0X34_00 and cp <= 0X4D_BF) # or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) # or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) # or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) # or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) # or (cp >= 0XF9_00 and cp <= 0XFA_FF) or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) # ): # return True return False def __UpperCAmelCase ( A : str ) -> Optional[Any]: # word like '180' or '身高' or '神' for char in word: UpperCAmelCase_ : str = ord(A ) if not _is_chinese_char(A ): return 0 return 1 def __UpperCAmelCase ( A : List[str] ) -> Dict: UpperCAmelCase_ : Optional[Any] = set() for token in tokens: UpperCAmelCase_ : str = len(A ) > 1 and is_chinese(A ) if chinese_word: word_set.add(A ) UpperCAmelCase_ : Optional[int] = list(A ) return word_list def __UpperCAmelCase ( A : List[str] , A : set() ) -> Optional[Any]: if not chinese_word_set: return bert_tokens UpperCAmelCase_ : Dict = max([len(A ) for w in chinese_word_set] ) UpperCAmelCase_ : List[str] = bert_tokens UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, len(A ) while start < end: UpperCAmelCase_ : str = True if is_chinese(bert_word[start] ): UpperCAmelCase_ : str = min(end - start , A ) for i in range(A , 1 , -1 ): UpperCAmelCase_ : Tuple = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): UpperCAmelCase_ : Union[str, Any] = '''##''' + bert_word[j] UpperCAmelCase_ : Any = start + i UpperCAmelCase_ : Optional[int] = False break if single_word: start += 1 return bert_word def __UpperCAmelCase ( A : List[str] , A : LTP , A : BertTokenizer ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = [] for i in range(0 , len(A ) , 1_0_0 ): UpperCAmelCase_ : int = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['''cws'''] ).cws UpperCAmelCase_ : Any = [get_chinese_word(A ) for r in res] ltp_res.extend(A ) assert len(A ) == len(A ) UpperCAmelCase_ : Tuple = [] for i in range(0 , len(A ) , 1_0_0 ): UpperCAmelCase_ : Optional[int] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=A , truncation=A , max_length=5_1_2 ) bert_res.extend(res['''input_ids'''] ) assert len(A ) == len(A ) UpperCAmelCase_ : Any = [] for input_ids, chinese_word in zip(A , A ): UpperCAmelCase_ : Union[str, Any] = [] for id in input_ids: UpperCAmelCase_ : Union[str, Any] = bert_tokenizer._convert_id_to_token(A ) input_tokens.append(A ) UpperCAmelCase_ : List[str] = add_sub_symbol(A , A ) UpperCAmelCase_ : Any = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(A ): if token[:2] == "##": UpperCAmelCase_ : int = token[2:] # save chinese tokens' pos if len(A ) == 1 and _is_chinese_char(ord(A ) ): ref_id.append(A ) ref_ids.append(A ) assert len(A ) == len(A ) return ref_ids def __UpperCAmelCase ( A : List[Any] ) -> Tuple: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: UpperCAmelCase_ : Optional[Any] = f.readlines() UpperCAmelCase_ : List[str] = [line.strip() for line in data if len(A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' UpperCAmelCase_ : Tuple = LTP(args.ltp ) # faster in GPU device UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained(args.bert ) UpperCAmelCase_ : Optional[int] = prepare_ref(A , A , A ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: UpperCAmelCase_ : str = [json.dumps(A ) + '''\n''' for ref in ref_ids] f.writelines(A ) if __name__ == "__main__": _UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) _UpperCamelCase : Any = parser.parse_args() main(args)
541
1
import os def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = os.path.dirname(os.path.realpath(_A ) ) SCREAMING_SNAKE_CASE__ = os.path.join(_A , '''triangle.txt''' ) with open(_A ) as f: SCREAMING_SNAKE_CASE__ = f.readlines() SCREAMING_SNAKE_CASE__ = [] for line in triangle: SCREAMING_SNAKE_CASE__ = [] for number in line.strip().split(''' ''' ): numbers_from_line.append(int(_A ) ) a.append(_A ) for i in range(1 , len(_A ) ): for j in range(len(a[i] ) ): SCREAMING_SNAKE_CASE__ = a[i - 1][j] if j != len(a[i - 1] ) else 0 SCREAMING_SNAKE_CASE__ = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(_A , _A ) return max(a[-1] ) if __name__ == "__main__": print(solution())
702
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCAmelCase__ : """simple docstring""" def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : int=32 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : List[Any]=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : int=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[Any]=1 , ) -> List[str]: SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = image_size SCREAMING_SNAKE_CASE__ = num_channels SCREAMING_SNAKE_CASE__ = embeddings_size SCREAMING_SNAKE_CASE__ = hidden_sizes SCREAMING_SNAKE_CASE__ = depths SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = scope SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = out_features SCREAMING_SNAKE_CASE__ = out_indices SCREAMING_SNAKE_CASE__ = num_groups def lowercase_ ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ = self.get_config() return config, pixel_values, labels def lowercase_ ( self : List[str] ) -> Dict: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase_ ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowercase_ ( self : Tuple ) -> int: SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = config_and_inputs SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a = False a = False a = False a = False a = False def lowercase_ ( self : Optional[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ = BitModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def lowercase_ ( self : Union[str, Any] ) -> str: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase_ ( self : str ) -> Dict: return @unittest.skip(reason='''Bit does not output attentions''' ) def lowercase_ ( self : Any ) -> Dict: pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def lowercase_ ( self : Tuple ) -> Optional[int]: pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def lowercase_ ( self : Optional[int] ) -> str: pass def lowercase_ ( self : str ) -> str: SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def lowercase_ ( self : Dict ) -> Tuple: SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowercase_ ( self : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def lowercase_ ( self : Any ) -> str: SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def lowercase_ ( self : str ) -> Optional[Any]: def check_hidden_states_output(__lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE__ = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE__ = layer_type SCREAMING_SNAKE_CASE__ = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def lowercase_ ( self : List[str] ) -> Dict: pass def lowercase_ ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def lowercase_ ( self : Optional[Any] ) -> Dict: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self : List[Any] ) -> List[Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase_ ( self : str ) -> List[Any]: SCREAMING_SNAKE_CASE__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = self.default_image_processor SCREAMING_SNAKE_CASE__ = prepare_img() SCREAMING_SNAKE_CASE__ = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) @require_torch class UpperCAmelCase__ ( A__ , unittest.TestCase ): """simple docstring""" a = (BitBackbone,) if is_torch_available() else () a = BitConfig a = False def lowercase_ ( self : Optional[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ = BitModelTester(self )
472
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_ : Tuple =logging.get_logger(__name__) A_ : Dict ={ '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __UpperCAmelCase ( __a ): __A : List[Any] = 'deformable_detr' __A : Union[str, Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.25 , _lowerCamelCase=False , **_lowerCamelCase , ): if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCAmelCase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): lowerCAmelCase_ = backbone_config.get('''model_type''' ) lowerCAmelCase_ = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ = config_class.from_dict(_lowerCamelCase ) lowerCAmelCase_ = use_timm_backbone lowerCAmelCase_ = backbone_config lowerCAmelCase_ = num_channels lowerCAmelCase_ = num_queries lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = d_model lowerCAmelCase_ = encoder_ffn_dim lowerCAmelCase_ = encoder_layers lowerCAmelCase_ = encoder_attention_heads lowerCAmelCase_ = decoder_ffn_dim lowerCAmelCase_ = decoder_layers lowerCAmelCase_ = decoder_attention_heads lowerCAmelCase_ = dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = activation_dropout lowerCAmelCase_ = activation_function lowerCAmelCase_ = init_std lowerCAmelCase_ = init_xavier_std lowerCAmelCase_ = encoder_layerdrop lowerCAmelCase_ = auxiliary_loss lowerCAmelCase_ = position_embedding_type lowerCAmelCase_ = backbone lowerCAmelCase_ = use_pretrained_backbone lowerCAmelCase_ = dilation # deformable attributes lowerCAmelCase_ = num_feature_levels lowerCAmelCase_ = encoder_n_points lowerCAmelCase_ = decoder_n_points lowerCAmelCase_ = two_stage lowerCAmelCase_ = two_stage_num_proposals lowerCAmelCase_ = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowerCAmelCase_ = class_cost lowerCAmelCase_ = bbox_cost lowerCAmelCase_ = giou_cost # Loss coefficients lowerCAmelCase_ = mask_loss_coefficient lowerCAmelCase_ = dice_loss_coefficient lowerCAmelCase_ = bbox_loss_coefficient lowerCAmelCase_ = giou_loss_coefficient lowerCAmelCase_ = eos_coefficient lowerCAmelCase_ = focal_alpha lowerCAmelCase_ = disable_custom_kernels super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def UpperCAmelCase_ ( self ): return self.encoder_attention_heads @property def UpperCAmelCase_ ( self ): return self.d_model def UpperCAmelCase_ ( self ): lowerCAmelCase_ = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCAmelCase_ = self.backbone_config.to_dict() lowerCAmelCase_ = self.__class__.model_type return output
274
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def snake_case_ ( __snake_case : Tuple=None , __snake_case : int=None) -> int: return field(default_factory=lambda: default , metadata=__snake_case) @dataclass class __UpperCAmelCase : __A : str = field( metadata={'help': 'The csv file to plot.'} , ) __A : bool = field( default=__a , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , ) __A : bool = field( default=__a , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , ) __A : bool = field( default=__a , metadata={'help': 'Disable logarithmic scale when plotting'} , ) __A : bool = field( default=__a , metadata={ 'help': 'Whether the csv file has training results or inference results. Defaults to inference results.' } , ) __A : Optional[str] = field( default=__a , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , ) __A : Optional[List[str]] = list_field( default=__a , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} ) def snake_case_ ( __snake_case : Optional[Any]) -> Dict: try: int(__snake_case) return True except ValueError: return False def snake_case_ ( __snake_case : Dict) -> int: try: float(__snake_case) return True except ValueError: return False class __UpperCAmelCase : def __init__( self , _lowerCamelCase ): lowerCAmelCase_ = args lowerCAmelCase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='''''' ) as csv_file: lowerCAmelCase_ = csv.DictReader(_lowerCamelCase ) for row in reader: lowerCAmelCase_ = row['''model'''] self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) ) self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) ) if can_convert_to_int(row['''result'''] ): # value is not None lowerCAmelCase_ = int(row['''result'''] ) elif can_convert_to_float(row['''result'''] ): # value is not None lowerCAmelCase_ = float(row['''result'''] ) def UpperCAmelCase_ ( self ): lowerCAmelCase_ ,lowerCAmelCase_ = plt.subplots() lowerCAmelCase_ = '''Time usage''' if self.args.is_time else '''Memory usage''' lowerCAmelCase_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference''' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('''log''' ) ax.set_yscale('''log''' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): lowerCAmelCase_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) ) lowerCAmelCase_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) ) lowerCAmelCase_ = self.result_dict[model_name]['''result'''] ((lowerCAmelCase_) ,(lowerCAmelCase_)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) lowerCAmelCase_ = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: lowerCAmelCase_ = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCamelCase , ) else: lowerCAmelCase_ = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((lowerCAmelCase_) ,(lowerCAmelCase_)) = ( ('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''') ) lowerCAmelCase_ = np.asarray(_lowerCamelCase , _lowerCamelCase )[: len(_lowerCamelCase )] plt.scatter( _lowerCamelCase , _lowerCamelCase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(_lowerCamelCase , _lowerCamelCase , '''--''' ) title_str += F''' {label_model_name} vs.''' lowerCAmelCase_ = title_str[:-4] lowerCAmelCase_ = '''Time in s''' if self.args.is_time else '''Memory in MB''' # plot plt.title(_lowerCamelCase ) plt.xlabel(_lowerCamelCase ) plt.ylabel(_lowerCamelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def snake_case_ ( ) -> Tuple: lowerCAmelCase_ = HfArgumentParser(__snake_case) lowerCAmelCase_ = parser.parse_args_into_dataclasses()[0] lowerCAmelCase_ = Plot(args=__snake_case) plot.plot() if __name__ == "__main__": main()
274
1
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : @staticmethod def snake_case_ ( *a__ , **a__): pass def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] )-> List[Any]: A__ = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCAmelCase__ ( UpperCamelCase_ : int )-> List[Any]: A__ = np.array(snake_case_ ) A__ = npimg.shape return {"hash": hashimage(snake_case_ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class _UpperCAmelCase ( unittest.TestCase ): UpperCamelCase__ = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) UpperCamelCase__ = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def snake_case_ ( self , a__ , a__ , a__): A__ = MaskGenerationPipeline(model=_a , image_processor=_a) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def snake_case_ ( self , a__ , a__): pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''') def snake_case_ ( self): pass @slow @require_torch def snake_case_ ( self): A__ = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''') A__ = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_5_6) # Shortening by hashing A__ = [] for i, o in enumerate(outputs['''masks''']): new_outupt += [{"mask": mask_to_test_readable(_a), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_a , decimals=4) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_4_4_4}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_2_1}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_6_7}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_3_2}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_0_5_3}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_6_7}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_3}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_0_9}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_8_7_9}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_8_3_4}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_7_1_6}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_6_1_2}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_9_9}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_5_2}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_3_2}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_1_6}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_9_9}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_8_3}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_6_4}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_3}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_3}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_0_8}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_3_3_5}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_3_2_6}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_2_6_2}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_9_9}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_8_6}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_8_4}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_8_7_3}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_8_7_1} ] , ) # fmt: on @require_torch @slow def snake_case_ ( self): A__ = """facebook/sam-vit-huge""" A__ = pipeline('''mask-generation''' , model=_a) A__ = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_5_6) # Shortening by hashing A__ = [] for i, o in enumerate(outputs['''masks''']): new_outupt += [{"mask": mask_to_test_readable(_a), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_a , decimals=4) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_4_4_4}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_2_1_0}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_6_7}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_3_2}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_0_5_3}, ] , )
710
def lowerCAmelCase__ ( UpperCamelCase_ : str , UpperCamelCase_ : str )-> float: def get_matched_characters(UpperCamelCase_ : str , UpperCamelCase_ : str ) -> str: A__ = [] A__ = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): A__ = int(max(0 , i - limit ) ) A__ = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(UpperCamelCase_ ) A__ = f"{_stra[0:_stra.index(UpperCamelCase_ )]} {_stra[_stra.index(UpperCamelCase_ ) + 1:]}" return "".join(UpperCamelCase_ ) # matching characters A__ = get_matched_characters(UpperCamelCase_ , UpperCamelCase_ ) A__ = get_matched_characters(UpperCamelCase_ , UpperCamelCase_ ) A__ = len(UpperCamelCase_ ) # transposition A__ = ( len([(ca, ca) for ca, ca in zip(UpperCamelCase_ , UpperCamelCase_ ) if ca != ca] ) // 2 ) if not match_count: A__ = 0.0 else: A__ = ( 1 / 3 * ( match_count / len(UpperCamelCase_ ) + match_count / len(UpperCamelCase_ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters A__ = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("hello", "world"))
526
0
from __future__ import annotations def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] ): """simple docstring""" lowerCAmelCase_ = len(UpperCamelCase__ ) # We need to create solution object to save path. lowerCAmelCase_ = [[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )] lowerCAmelCase_ = run_maze(UpperCamelCase__ , 0 , 0 , UpperCamelCase__ ) if solved: print("\n".join(str(UpperCamelCase__ ) for row in solutions ) ) else: print("No solution exists!" ) return solved def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): """simple docstring""" lowerCAmelCase_ = len(UpperCamelCase__ ) # Final check point. if i == j == (size - 1): lowerCAmelCase_ = 1 return True lowerCAmelCase_ = (not i < 0) and (not j < 0) # Check lower bounds lowerCAmelCase_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. lowerCAmelCase_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited lowerCAmelCase_ = 1 # check for directions if ( run_maze(UpperCamelCase__ , i + 1 , UpperCamelCase__ , UpperCamelCase__ ) or run_maze(UpperCamelCase__ , UpperCamelCase__ , j + 1 , UpperCamelCase__ ) or run_maze(UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) or run_maze(UpperCamelCase__ , UpperCamelCase__ , j - 1 , UpperCamelCase__ ) ): return True lowerCAmelCase_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
290
'''simple docstring''' def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): if exponent == 1: return base if exponent % 2 == 0: UpperCAmelCase__ : List[str] = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value def _UpperCamelCase ( UpperCamelCase__ = 1_7_7_7 , UpperCamelCase__ = 1_8_5_5 , UpperCamelCase__ = 8 ): UpperCAmelCase__ : List[str] = base for _ in range(1 , UpperCamelCase__ ): UpperCAmelCase__ : Dict = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 1_0**digits ) return result if __name__ == "__main__": print(f"""{solution() = }""")
407
0
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration A = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] A = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] A = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) A = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) A = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def a(lowercase__ , lowercase__ ): '''simple docstring''' for tf_name, hf_name in patterns: snake_case_ = k.replace(lowercase__ , lowercase__ ) return k def a(lowercase__ , lowercase__ ): '''simple docstring''' snake_case_ = BigBirdPegasusConfig(**lowercase__ ) snake_case_ = BigBirdPegasusForConditionalGeneration(lowercase__ ) snake_case_ = torch_model.state_dict() snake_case_ = {} # separating decoder weights snake_case_ = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )} snake_case_ = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )} for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ): snake_case_ = [k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE] if any(lowercase__ ): continue snake_case_ = DECODER_PATTERNS snake_case_ = rename_state_dict_key(lowercase__ , lowercase__ ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): snake_case_ = v.T snake_case_ = torch.from_numpy(lowercase__ ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ): snake_case_ = [k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE] if any(lowercase__ ): continue snake_case_ = REMAINING_PATTERNS snake_case_ = rename_state_dict_key(lowercase__ , lowercase__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): snake_case_ = v.T snake_case_ = torch.from_numpy(lowercase__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" snake_case_ = mapping['model.embed_positions.weight'] snake_case_ = mapping.pop('model.embed_positions.weight' ) snake_case_ , snake_case_ = torch_model.load_state_dict(lowercase__ , strict=lowercase__ ) snake_case_ = [ k for k in missing if k not in [ 'final_logits_bias', 'model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight', 'lm_head.weight', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def a(lowercase__ ): '''simple docstring''' snake_case_ = tf.train.list_variables(lowercase__ ) snake_case_ = {} snake_case_ = ['global_step'] for name, shape in tqdm(lowercase__ , desc='converting tf checkpoint to dict' ): snake_case_ = any(pat in name for pat in ignore_name ) if skip_key: continue snake_case_ = tf.train.load_variable(lowercase__ , lowercase__ ) snake_case_ = array return tf_weights def a(lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' snake_case_ = get_tf_weights_as_numpy(lowercase__ ) snake_case_ = convert_bigbird_pegasus(lowercase__ , lowercase__ ) torch_model.save_pretrained(lowercase__ ) if __name__ == "__main__": A = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') A = parser.parse_args() A = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
46
def a(lowercase__ , lowercase__ ): '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ): raise ValueError('iterations must be defined as integers' ) if not isinstance(lowercase__ , lowercase__ ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) snake_case_ = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(lowercase__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
46
1
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _snake_case : Dict = logging.get_logger() def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : LevitConfig, lowerCAmelCase_ : Path, lowerCAmelCase_ : bool = True ): print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __lowerCAmelCase = timm.create_model('levit_128s', pretrained=lowerCAmelCase_ ) else: __lowerCAmelCase = timm.create_model('levit_128', pretrained=lowerCAmelCase_ ) if hidden_sizes == 192: __lowerCAmelCase = timm.create_model('levit_192', pretrained=lowerCAmelCase_ ) if hidden_sizes == 256: __lowerCAmelCase = timm.create_model('levit_256', pretrained=lowerCAmelCase_ ) if hidden_sizes == 384: __lowerCAmelCase = timm.create_model('levit_384', pretrained=lowerCAmelCase_ ) from_model.eval() __lowerCAmelCase = LevitForImageClassificationWithTeacher(lowerCAmelCase_ ).eval() __lowerCAmelCase = OrderedDict() __lowerCAmelCase = from_model.state_dict() __lowerCAmelCase = list(from_model.state_dict().keys() ) __lowerCAmelCase = list(our_model.state_dict().keys() ) print(len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) ) for i in range(len(lowerCAmelCase_ ) ): __lowerCAmelCase = weights[og_keys[i]] our_model.load_state_dict(lowerCAmelCase_ ) __lowerCAmelCase = torch.randn((2, 3, 224, 224) ) __lowerCAmelCase = from_model(lowerCAmelCase_ ) __lowerCAmelCase = our_model(lowerCAmelCase_ ).logits assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one." __lowerCAmelCase = name print(lowerCAmelCase_ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __lowerCAmelCase = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def a_ ( lowerCAmelCase_ : Path, lowerCAmelCase_ : str = None, lowerCAmelCase_ : bool = True ): __lowerCAmelCase = 'imagenet-1k-id2label.json' __lowerCAmelCase = 1000 __lowerCAmelCase = (1, num_labels) __lowerCAmelCase = 'huggingface/label-files' __lowerCAmelCase = num_labels __lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) ) __lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ ) __lowerCAmelCase = { 'levit-128S': 128, 'levit-128': 128, 'levit-192': 192, 'levit-256': 256, 'levit-384': 384, } __lowerCAmelCase = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name], lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) return config, expected_shape if __name__ == "__main__": _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) _snake_case : List[Any] = parser.parse_args() _snake_case : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
53
"""simple docstring""" from collections import defaultdict class __UpperCAmelCase : '''simple docstring''' def __init__( self , snake_case_ , snake_case_ ): '''simple docstring''' A__ : Tuple = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 A__ : Optional[int] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(snake_case_ ) ) ] A__ : Optional[int] = defaultdict(snake_case_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 A__ : Optional[int] = (1 << len(snake_case_ )) - 1 def lowerCamelCase ( self , snake_case_ , snake_case_ ): '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement A__ : Optional[int] = self.count_ways_until(snake_case_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. A__ : List[str] = total_ways_util return self.dp[mask][task_no] def lowerCamelCase ( self , snake_case_ ): '''simple docstring''' for i in range(len(snake_case_ ) ): for j in task_performed[i]: self.task[j].append(snake_case_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": _UpperCamelCase = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. _UpperCamelCase = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
363
0
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : Optional[int] = BloomTokenizerFast SCREAMING_SNAKE_CASE__ : Dict = BloomTokenizerFast SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = '''tokenizer_file''' SCREAMING_SNAKE_CASE__ : List[Any] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def __magic_name__( self :int ) -> Dict: super().setUp() __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def __magic_name__( self :Union[str, Any] , **lowerCAmelCase__ :List[str] ) -> str: kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __magic_name__( self :Union[str, Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE : Tuple = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : List[Any] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] __SCREAMING_SNAKE_CASE : Tuple = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]] __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase__ )['''input_ids'''] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :List[str] , lowerCAmelCase__ :List[str]=6 ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input __SCREAMING_SNAKE_CASE : List[Any] = '''This is a simple input''' __SCREAMING_SNAKE_CASE : str = ['''This is a simple input 1''', '''This is a simple input 2'''] __SCREAMING_SNAKE_CASE : Optional[Any] = ('''This is a simple input''', '''This is a pair''') __SCREAMING_SNAKE_CASE : Union[str, Any] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) tokenizer_r.encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) tokenizer_r.batch_encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) tokenizer_r.encode(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) tokenizer_r.batch_encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) __SCREAMING_SNAKE_CASE : Dict = None # Hotfixing padding = None self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises( lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , ) # Pair input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises( lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , ) def __magic_name__( self :List[str] ) -> Dict: __SCREAMING_SNAKE_CASE : Tuple = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = next(iter(lowerCAmelCase__ ) )['''premise'''] # pick up one data __SCREAMING_SNAKE_CASE : Tuple = list(sample_data.values() ) __SCREAMING_SNAKE_CASE : Optional[int] = list(map(tokenizer.encode , lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Any = [tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) for x in output_tokens] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> List[Any]: # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
713
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __lowerCAmelCase : Optional[int] =logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED __lowerCAmelCase : str ={ 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } __lowerCAmelCase : Optional[int] ={ 'allenai/led-base-16384': 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Any = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __SCREAMING_SNAKE_CASE : Any = bs[:] __SCREAMING_SNAKE_CASE : List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase__ ) cs.append(2**8 + n ) n += 1 __SCREAMING_SNAKE_CASE : List[str] = [chr(lowercase__ ) for n in cs] return dict(zip(lowercase__ , lowercase__ ) ) def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : int = set() __SCREAMING_SNAKE_CASE : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = char return pairs class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple="replace" , lowerCAmelCase__ :List[str]="<s>" , lowerCAmelCase__ :List[str]="</s>" , lowerCAmelCase__ :Any="</s>" , lowerCAmelCase__ :int="<s>" , lowerCAmelCase__ :List[str]="<unk>" , lowerCAmelCase__ :List[str]="<pad>" , lowerCAmelCase__ :Dict="<mask>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :int , ) -> str: __SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token __SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token __SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token __SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token __SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token __SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __SCREAMING_SNAKE_CASE : int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , ) with open(lowerCAmelCase__ , encoding='''utf-8''' ) as vocab_handle: __SCREAMING_SNAKE_CASE : List[Any] = json.load(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.encoder.items()} __SCREAMING_SNAKE_CASE : int = errors # how to handle errors in decoding __SCREAMING_SNAKE_CASE : List[Any] = bytes_to_unicode() __SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase__ , encoding='''utf-8''' ) as merges_handle: __SCREAMING_SNAKE_CASE : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1] __SCREAMING_SNAKE_CASE : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges] __SCREAMING_SNAKE_CASE : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) __SCREAMING_SNAKE_CASE : Optional[int] = {} __SCREAMING_SNAKE_CASE : List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def __magic_name__( self :Optional[Any] ) -> Dict: return len(self.encoder ) def __magic_name__( self :Union[str, Any] ) -> Any: return dict(self.encoder , **self.added_tokens_encoder ) def __magic_name__( self :Dict , lowerCAmelCase__ :Any ) -> Tuple: if token in self.cache: return self.cache[token] __SCREAMING_SNAKE_CASE : List[Any] = tuple(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = get_pairs(lowerCAmelCase__ ) if not pairs: return token while True: __SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = bigram __SCREAMING_SNAKE_CASE : Tuple = [] __SCREAMING_SNAKE_CASE : Optional[Any] = 0 while i < len(lowerCAmelCase__ ): try: __SCREAMING_SNAKE_CASE : int = word.index(lowerCAmelCase__ , lowerCAmelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __SCREAMING_SNAKE_CASE : Optional[Any] = j if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __SCREAMING_SNAKE_CASE : List[str] = tuple(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = new_word if len(lowerCAmelCase__ ) == 1: break else: __SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = ''' '''.join(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = word return word def __magic_name__( self :str , lowerCAmelCase__ :Dict ) -> List[str]: __SCREAMING_SNAKE_CASE : List[Any] = [] for token in re.findall(self.pat , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : Tuple = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(''' ''' ) ) return bpe_tokens def __magic_name__( self :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]: return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) ) def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]: return self.decoder.get(lowerCAmelCase__ ) def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> List[Any]: __SCREAMING_SNAKE_CASE : List[Any] = ''''''.join(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def __magic_name__( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __SCREAMING_SNAKE_CASE : Any = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __SCREAMING_SNAKE_CASE : Optional[int] = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '''\n''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __SCREAMING_SNAKE_CASE : str = token_index writer.write(''' '''.join(lowerCAmelCase__ ) + '''\n''' ) index += 1 return vocab_file, merge_file def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] __SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __magic_name__( self :Any , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1] def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: __SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] __SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __magic_name__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=False , **lowerCAmelCase__ :Union[str, Any] ) -> int: __SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()): __SCREAMING_SNAKE_CASE : str = ''' ''' + text return (text, kwargs) def __magic_name__( self :List[Any] , lowerCAmelCase__ :Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> dict: __SCREAMING_SNAKE_CASE : Tuple = super()._pad( encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ) # Load from model defaults if return_attention_mask is None: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __SCREAMING_SNAKE_CASE : Dict = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __SCREAMING_SNAKE_CASE : Dict = len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCAmelCase__ ) if needs_to_be_padded: __SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __SCREAMING_SNAKE_CASE : int = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": __SCREAMING_SNAKE_CASE : Tuple = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
260
0
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _lowercase : List[str] =logging.get_logger(__name__) # pylint: disable=invalid-name _lowercase : Tuple ='\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Dict , _lowercase : Any=8) -> List[str]: """simple docstring""" a__ : Tuple = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a__ : Dict = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : Tuple=512 , _lowercase : str=512) -> Tuple: """simple docstring""" a__ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1) a__ : List[str] = np.array(pil_image.convert("""RGB""")) a__ : List[Any] = arr.astype(np.floataa) / 127.5 - 1 a__ : Union[str, Any] = np.transpose(_lowercase , [2, 0, 1]) a__ : Optional[int] = torch.from_numpy(_lowercase).unsqueeze(0) return image class snake_case__ (_a ): """simple docstring""" def __init__( self , __lowercase , __lowercase , __lowercase , ) -> List[str]: """simple docstring""" super().__init__() self.register_modules( unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , ) a__ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> Optional[Any]: """simple docstring""" a__ : Optional[int] = min(int(num_inference_steps * strength ) , lowercase__ ) a__ : Any = max(num_inference_steps - init_timestep , 0 ) a__ : Dict = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Optional[Any]: """simple docstring""" if not isinstance(lowercase__ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase__ )}''' ) a__ : Dict = image.to(device=lowercase__ , dtype=lowercase__ ) a__ : Union[str, Any] = batch_size * num_images_per_prompt if image.shape[1] == 4: a__ : Any = image else: if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(lowercase__ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) elif isinstance(lowercase__ , lowercase__ ): a__ : Any = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase__ ) ] a__ : List[str] = torch.cat(lowercase__ , dim=0 ) else: a__ : Tuple = self.movq.encode(lowercase__ ).latent_dist.sample(lowercase__ ) a__ : Optional[int] = self.movq.config.scaling_factor * init_latents a__ : Any = torch.cat([init_latents] , dim=0 ) a__ : List[str] = init_latents.shape a__ : Optional[Any] = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__ ) # get latents a__ : Any = self.scheduler.add_noise(lowercase__ , lowercase__ , lowercase__ ) a__ : Tuple = init_latents return latents def SCREAMING_SNAKE_CASE__( self , __lowercase=0 ) -> str: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) a__ : Dict = torch.device(F'''cuda:{gpu_id}''' ) a__ : List[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase__ , lowercase__ ) def SCREAMING_SNAKE_CASE__( self , __lowercase=0 ) -> List[str]: """simple docstring""" if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) a__ : Optional[Any] = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=lowercase__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a__ : Optional[int] = None for cpu_offloaded_model in [self.unet, self.movq]: a__ , a__ : Tuple = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__ ) # We'll offload the last model manually. a__ : Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase__ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase__ ) def __call__( self , __lowercase , __lowercase , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 1_0_0 , __lowercase = 4.0 , __lowercase = 0.3 , __lowercase = 1 , __lowercase = None , __lowercase = "pil" , __lowercase = True , ) -> str: """simple docstring""" a__ : Any = self._execution_device a__ : Tuple = guidance_scale > 1.0 if isinstance(lowercase__ , lowercase__ ): a__ : List[str] = torch.cat(lowercase__ , dim=0 ) a__ : List[Any] = image_embeds.shape[0] if isinstance(lowercase__ , lowercase__ ): a__ : Optional[int] = torch.cat(lowercase__ , dim=0 ) if do_classifier_free_guidance: a__ : Dict = image_embeds.repeat_interleave(lowercase__ , dim=0 ) a__ : List[Any] = negative_image_embeds.repeat_interleave(lowercase__ , dim=0 ) a__ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase__ ) if not isinstance(lowercase__ , lowercase__ ): a__ : Any = [image] if not all(isinstance(lowercase__ , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( F'''Input is in incorrect format: {[type(lowercase__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' ) a__ : List[Any] = torch.cat([prepare_image(lowercase__ , lowercase__ , lowercase__ ) for i in image] , dim=0 ) a__ : Optional[Any] = image.to(dtype=image_embeds.dtype , device=lowercase__ ) a__ : Dict = self.movq.encode(lowercase__ )["""latents"""] a__ : Any = latents.repeat_interleave(lowercase__ , dim=0 ) self.scheduler.set_timesteps(lowercase__ , device=lowercase__ ) a__ , a__ : Dict = self.get_timesteps(lowercase__ , lowercase__ , lowercase__ ) a__ : List[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt ) a__ , a__ : Any = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor ) a__ : Dict = self.prepare_latents( lowercase__ , lowercase__ , lowercase__ , lowercase__ , image_embeds.dtype , lowercase__ , lowercase__ ) for i, t in enumerate(self.progress_bar(lowercase__ ) ): # expand the latents if we are doing classifier free guidance a__ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a__ : Any = {"""image_embeds""": image_embeds} a__ : Optional[int] = self.unet( sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0] if do_classifier_free_guidance: a__ , a__ : Any = noise_pred.split(latents.shape[1] , dim=1 ) a__ , a__ : List[Any] = noise_pred.chunk(2 ) a__ , a__ : List[str] = variance_pred.chunk(2 ) a__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a__ : List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a__ , a__ : Any = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a__ : List[Any] = self.scheduler.step( lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0] # post-processing a__ : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: a__ : List[str] = image * 0.5 + 0.5 a__ : Any = image.clamp(0 , 1 ) a__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a__ : Tuple = self.numpy_to_pil(lowercase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase__ )
136
from __future__ import annotations def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[str]: '''simple docstring''' if nth_term == "": return [""] __UpperCAmelCase = int(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = int(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = [] for temp in range(int(SCREAMING_SNAKE_CASE ) ): series.append(f'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE ) )}''' if series else '''1''' ) return series if __name__ == "__main__": import doctest doctest.testmod() A_ : Any = int(input('Enter the last number (nth term) of the P-Series')) A_ : List[str] = int(input('Enter the power for P-Series')) print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p') print(p_series(nth_term, power))
303
0
from __future__ import annotations from collections import deque class _a : '''simple docstring''' def __init__( self , __UpperCAmelCase ): __A : list[dict] = [] self.adlist.append( {"value": "", "next_states": [], "fail_state": 0, "output": []} ) for keyword in keywords: self.add_keyword(__UpperCAmelCase ) self.set_fail_transitions() def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ): for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def __UpperCAmelCase( self , __UpperCAmelCase ): __A : int = 0 for character in keyword: __A : Tuple = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase ) if next_state is None: self.adlist.append( { "value": character, "next_states": [], "fail_state": 0, "output": [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) __A : Optional[Any] = len(self.adlist ) - 1 else: __A : Dict = next_state self.adlist[current_state]["output"].append(__UpperCAmelCase ) def __UpperCAmelCase( self ): __A : deque = deque() for node in self.adlist[0]["next_states"]: q.append(__UpperCAmelCase ) __A : List[str] = 0 while q: __A : Optional[int] = q.popleft() for child in self.adlist[r]["next_states"]: q.append(__UpperCAmelCase ) __A : Union[str, Any] = self.adlist[r]["fail_state"] while ( self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"] ) is None and state != 0 ): __A : Optional[Any] = self.adlist[state]["fail_state"] __A : Union[str, Any] = self.find_next_state( __UpperCAmelCase , self.adlist[child]["value"] ) if self.adlist[child]["fail_state"] is None: __A : List[str] = 0 __A : Optional[int] = ( self.adlist[child]["output"] + self.adlist[self.adlist[child]["fail_state"]]["output"] ) def __UpperCAmelCase( self , __UpperCAmelCase ): __A : dict = {} # returns a dict with keywords and list of its occurrences __A : Any = 0 for i in range(len(__UpperCAmelCase ) ): while ( self.find_next_state(__UpperCAmelCase , string[i] ) is None and current_state != 0 ): __A : str = self.adlist[current_state]["fail_state"] __A : Tuple = self.find_next_state(__UpperCAmelCase , string[i] ) if next_state is None: __A : Optional[Any] = 0 else: __A : Dict = next_state for key in self.adlist[current_state]["output"]: if key not in result: __A : List[Any] = [] result[key].append(i - len(__UpperCAmelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
702
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=False , ): __A : Tuple = size if size is not None else {"height": 20, "width": 20} __A : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18} __A : int = parent __A : List[Any] = batch_size __A : Tuple = num_channels __A : Any = image_size __A : Optional[int] = min_resolution __A : Any = max_resolution __A : str = do_resize __A : Tuple = size __A : Tuple = do_center_crop __A : Union[str, Any] = crop_size __A : Tuple = do_normalize __A : Union[str, Any] = image_mean __A : Dict = image_std __A : Optional[Any] = do_reduce_labels def __UpperCAmelCase( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowerCamelCase_ ( ) -> str: __A : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) __A : Optional[Any] = Image.open(dataset[0]["file"] ) __A : Union[str, Any] = Image.open(dataset[1]["file"] ) return image, map def lowerCamelCase_ ( ) -> Dict: __A : str = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) __A : List[Any] = Image.open(ds[0]["file"] ) __A : Union[str, Any] = Image.open(ds[1]["file"] ) __A : Optional[Any] = Image.open(ds[2]["file"] ) __A : str = Image.open(ds[3]["file"] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _a ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Tuple = BeitImageProcessor if is_vision_available() else None def __UpperCAmelCase( self ): __A : Tuple = BeitImageProcessingTester(self ) @property def __UpperCAmelCase( self ): return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase( self ): __A : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "center_crop" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) ) def __UpperCAmelCase( self ): __A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 20, "width": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase ) __A : str = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__UpperCAmelCase ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase ) def __UpperCAmelCase( self ): pass def __UpperCAmelCase( self ): # Initialize image_processing __A : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input __A : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCAmelCase( self ): # Initialize image_processing __A : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input __A : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCAmelCase( self ): # Initialize image_processing __A : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input __A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __A : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCAmelCase( self ): # Initialize image_processing __A : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) __A : Tuple = [] for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input __A : str = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test batched __A : Any = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test not batched input (PIL images) __A , __A : Optional[Any] = prepare_semantic_single_inputs() __A : Dict = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test batched input (PIL images) __A , __A : List[Any] = prepare_semantic_batch_inputs() __A : Tuple = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 2, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) def __UpperCAmelCase( self ): # Initialize image_processing __A : Tuple = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 __A , __A : List[Any] = prepare_semantic_single_inputs() __A : Optional[int] = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 150 ) __A : Optional[Any] = True __A : int = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 )
387
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.txt'} __magic_name__ = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __magic_name__ = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __magic_name__ = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_INIT_CONFIGURATION a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ConvBertTokenizer def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,): '''simple docstring''' super().__init__( _a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,) A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_a ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars ): A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) ) A_ : str = do_lower_case A_ : Any = strip_accents A_ : int = tokenize_chinese_chars A_ : Tuple = normalizer_class(**_a ) A_ : Any = do_lower_case def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ): '''simple docstring''' A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : int = [self.sep_token_id] A_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a ) return tuple(_a )
665
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ): '''simple docstring''' A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a ) return generator, ["Something to write", "Something else"] def _a ( self : str ,_a : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : Any = generator("""Something there""" ) self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a ) self.assertEqual( _a ,[ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] ,) A_ : List[str] = generator( ["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a ) self.assertEqual( _a ,[ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] ,) with self.assertRaises(_a ): generator(4 ) @require_torch def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" ) # do_sample=False necessary for reproducibility A_ : Tuple = generator("""Something there""" ,do_sample=_a ) self.assertEqual(_a ,[{"""generated_text""": """"""}] ) A_ : Optional[int] = 3 A_ : Tuple = generator( """Something there""" ,num_return_sequences=_a ,num_beams=_a ,) A_ : Optional[Any] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a ,_a ) A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a ) self.assertEqual( _a ,[ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] ,) A_ : Dict = generator.model.config.eos_token_id A_ : Optional[int] = """<pad>""" A_ : List[Any] = generator( ["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,) self.assertEqual( _a ,[ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] ,) @require_tf def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" ) # do_sample=False necessary for reproducibility A_ : Dict = generator("""Something there""" ,do_sample=_a ) self.assertEqual(_a ,[{"""generated_text""": """"""}] )
665
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { 'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json', 'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json', } class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = "luke" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any=50_267 , SCREAMING_SNAKE_CASE__ : Tuple=500_000 , SCREAMING_SNAKE_CASE__ : Tuple=768 , SCREAMING_SNAKE_CASE__ : Dict=256 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Any=3_072 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=512 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Any=1e-1_2 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : str=2 , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> int: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = entity_vocab_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = entity_emb_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = hidden_act lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = use_entity_aware_attention lowerCAmelCase__ = classifier_dropout
125
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask UpperCamelCase = logging.getLogger(__name__) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=-1 ) -> str: # in NER datasets, the last column is usually reserved for NER label lowerCAmelCase__ = label_idx def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = mode.value lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' ) lowerCAmelCase__ = 1 lowerCAmelCase__ = [] with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f: lowerCAmelCase__ = [] lowerCAmelCase__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) ) guid_index += 1 lowerCAmelCase__ = [] lowerCAmelCase__ = [] else: lowerCAmelCase__ = line.split(" " ) words.append(splits[0] ) if len(SCREAMING_SNAKE_CASE__ ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) ) return examples def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> Dict: lowerCAmelCase__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(SCREAMING_SNAKE_CASE__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(SCREAMING_SNAKE_CASE__ ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: if path: with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: lowerCAmelCase__ = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : Dict ) -> List[str]: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: if path: with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: lowerCAmelCase__ = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = mode.value lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' ) lowerCAmelCase__ = 1 lowerCAmelCase__ = [] with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f: for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = [] lowerCAmelCase__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) ) guid_index += 1 return examples def a ( self : int , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> int: lowerCAmelCase__ = 0 for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = preds_list[example_id] lowerCAmelCase__ = "" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(SCREAMING_SNAKE_CASE__ ) example_id += 1 def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: if path: with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
125
1
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() A = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model A = { # fairseq: "wmt19-ru-en": {"length_penalty": 1.1}, "wmt19-en-ru": {"length_penalty": 1.15}, "wmt19-en-de": {"length_penalty": 1.0}, "wmt19-de-en": {"length_penalty": 1.1}, # allenai: "wmt16-en-de-dist-12-1": {"length_penalty": 0.6}, "wmt16-en-de-dist-6-1": {"length_penalty": 0.6}, "wmt16-en-de-12-1": {"length_penalty": 0.8}, "wmt19-de-en-6-6-base": {"length_penalty": 0.6}, "wmt19-de-en-6-6-big": {"length_penalty": 0.6}, } # this remaps the different models to their organization names A = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: A = "facebook" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: A = "allenai" def __UpperCAmelCase ( __A ) -> Optional[int]: '''simple docstring''' UpperCAmelCase__ = dict((re.sub(R"@@$" , "" , __A ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __A ), v) for k, v in d.items() ) UpperCAmelCase__ = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] UpperCAmelCase__ = d[k] # restore return da def __UpperCAmelCase ( __A , __A ) -> Any: '''simple docstring''' assert os.path.exists(__A ) os.makedirs(__A , exist_ok=__A ) print(F"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models UpperCAmelCase__ = basename(__A ) UpperCAmelCase__ = dirname(__A ) UpperCAmelCase__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel UpperCAmelCase__ = cls.hub_models() UpperCAmelCase__ = {"bpe": "fastbpe", "tokenizer": "moses"} UpperCAmelCase__ = "." # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(F"""using checkpoint {checkpoint_file}""" ) UpperCAmelCase__ = hub_utils.from_pretrained( __A , __A , __A , archive_map=__A , **__A ) UpperCAmelCase__ = vars(chkpt["args"]["model"] ) UpperCAmelCase__ = args["source_lang"] UpperCAmelCase__ = args["target_lang"] UpperCAmelCase__ = dirname(__A ) UpperCAmelCase__ = basename(__A ) # dicts UpperCAmelCase__ = os.path.join(__A , F"""dict.{src_lang}.txt""" ) UpperCAmelCase__ = os.path.join(__A , F"""dict.{tgt_lang}.txt""" ) UpperCAmelCase__ = Dictionary.load(__A ) UpperCAmelCase__ = rewrite_dict_keys(src_dict.indices ) UpperCAmelCase__ = len(__A ) UpperCAmelCase__ = os.path.join(__A , "vocab-src.json" ) print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab UpperCAmelCase__ = True for k in src_vocab.keys(): if not k.islower(): UpperCAmelCase__ = False break UpperCAmelCase__ = Dictionary.load(__A ) UpperCAmelCase__ = rewrite_dict_keys(tgt_dict.indices ) UpperCAmelCase__ = len(__A ) UpperCAmelCase__ = os.path.join(__A , "vocab-tgt.json" ) print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) ) # merges_file (bpecodes) UpperCAmelCase__ = os.path.join(__A , VOCAB_FILES_NAMES["merges_file"] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" UpperCAmelCase__ = os.path.join(__A , __A ) if os.path.exists(__A ): break with open(__A , encoding="utf-8" ) as fin: UpperCAmelCase__ = fin.read() UpperCAmelCase__ = re.sub(R" \d+$" , "" , __A , 0 , re.M ) # remove frequency number print(F"""Generating {merges_file}""" ) with open(__A , "w" , encoding="utf-8" ) as fout: fout.write(__A ) # model config UpperCAmelCase__ = os.path.join(__A , "config.json" ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args["bpe"]}""" assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args["tokenizer"]}""" UpperCAmelCase__ = { "architectures": ["FSMTForConditionalGeneration"], "model_type": "fsmt", "activation_dropout": args["activation_dropout"], "activation_function": "relu", "attention_dropout": args["attention_dropout"], "d_model": args["decoder_embed_dim"], "dropout": args["dropout"], "init_std": 0.02, "max_position_embeddings": args["max_source_positions"], "num_hidden_layers": args["encoder_layers"], "src_vocab_size": src_vocab_size, "tgt_vocab_size": tgt_vocab_size, "langs": [src_lang, tgt_lang], "encoder_attention_heads": args["encoder_attention_heads"], "encoder_ffn_dim": args["encoder_ffn_embed_dim"], "encoder_layerdrop": args["encoder_layerdrop"], "encoder_layers": args["encoder_layers"], "decoder_attention_heads": args["decoder_attention_heads"], "decoder_ffn_dim": args["decoder_ffn_embed_dim"], "decoder_layerdrop": args["decoder_layerdrop"], "decoder_layers": args["decoder_layers"], "bos_token_id": 0, "pad_token_id": 1, "eos_token_id": 2, "is_encoder_decoder": True, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_all_embeddings"], } # good hparam defaults to start with UpperCAmelCase__ = 5 UpperCAmelCase__ = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: UpperCAmelCase__ = best_score_hparams[model_dir]["length_penalty"] else: UpperCAmelCase__ = 1.0 print(F"""Generating {fsmt_model_config_file}""" ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) ) # tokenizer config UpperCAmelCase__ = os.path.join(__A , __A ) UpperCAmelCase__ = { "langs": [src_lang, tgt_lang], "model_max_length": 1_0_2_4, "do_lower_case": do_lower_case, } print(F"""Generating {fsmt_tokenizer_config_file}""" ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) ) # model UpperCAmelCase__ = chkpt["models"][0] UpperCAmelCase__ = model.state_dict() # rename keys to start with 'model.' UpperCAmelCase__ = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys UpperCAmelCase__ = [ "model.model", "model.encoder.version", "model.decoder.version", "model.encoder_embed_tokens.weight", "model.decoder_embed_tokens.weight", "model.encoder.embed_positions._float_tensor", "model.decoder.embed_positions._float_tensor", ] for k in ignore_keys: model_state_dict.pop(__A , __A ) UpperCAmelCase__ = FSMTConfig.from_pretrained(__A ) UpperCAmelCase__ = FSMTForConditionalGeneration(__A ) # check that it loads ok model_new.load_state_dict(__A , strict=__A ) # save UpperCAmelCase__ = os.path.join(__A , __A ) print(F"""Generating {pytorch_weights_dump_path}""" ) torch.save(__A , __A ) print("Conversion is done!" ) print("\nLast step is to upload the files to s3" ) print(F"""cd {data_root}""" ) print(F"""transformers-cli upload {model_dir}""" ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fsmt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) A = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
475
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class lowercase__ ( unittest.TestCase ): def _UpperCAmelCase ( self : Any ): """simple docstring""" UpperCAmelCase__ = tempfile.mkdtemp() UpperCAmelCase__ = SamImageProcessor() UpperCAmelCase__ = SamProcessor(_lowercase ) processor.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self : Optional[int] , **_lowercase : str ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor def _UpperCAmelCase ( self : Optional[int] ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self : Dict ): """simple docstring""" UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 ) UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowercase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowercase ) def _UpperCAmelCase ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=_lowercase ) UpperCAmelCase__ = self.prepare_image_inputs() UpperCAmelCase__ = image_processor(_lowercase , return_tensors="np" ) UpperCAmelCase__ = processor(images=_lowercase , return_tensors="np" ) input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def _UpperCAmelCase ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=_lowercase ) UpperCAmelCase__ = [torch.ones((1, 3, 5, 5) )] UpperCAmelCase__ = [[17_64, 26_46]] UpperCAmelCase__ = [[6_83, 10_24]] UpperCAmelCase__ = processor.post_process_masks(_lowercase , _lowercase , _lowercase ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) UpperCAmelCase__ = processor.post_process_masks( _lowercase , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )] UpperCAmelCase__ = processor.post_process_masks(_lowercase , np.array(_lowercase ) , np.array(_lowercase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) UpperCAmelCase__ = [[1, 0], [0, 1]] with self.assertRaises(_lowercase ): UpperCAmelCase__ = processor.post_process_masks(_lowercase , np.array(_lowercase ) , np.array(_lowercase ) ) @require_vision @require_tf class lowercase__ ( unittest.TestCase ): def _UpperCAmelCase ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = tempfile.mkdtemp() UpperCAmelCase__ = SamImageProcessor() UpperCAmelCase__ = SamProcessor(_lowercase ) processor.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self : Union[str, Any] , **_lowercase : int ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor def _UpperCAmelCase ( self : Dict ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 ) UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowercase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowercase ) def _UpperCAmelCase ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=_lowercase ) UpperCAmelCase__ = self.prepare_image_inputs() UpperCAmelCase__ = image_processor(_lowercase , return_tensors="np" ) UpperCAmelCase__ = processor(images=_lowercase , return_tensors="np" ) input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def _UpperCAmelCase ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=_lowercase ) UpperCAmelCase__ = [tf.ones((1, 3, 5, 5) )] UpperCAmelCase__ = [[17_64, 26_46]] UpperCAmelCase__ = [[6_83, 10_24]] UpperCAmelCase__ = processor.post_process_masks(_lowercase , _lowercase , _lowercase , return_tensors="tf" ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) UpperCAmelCase__ = processor.post_process_masks( _lowercase , tf.convert_to_tensor(_lowercase ) , tf.convert_to_tensor(_lowercase ) , return_tensors="tf" , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )] UpperCAmelCase__ = processor.post_process_masks( _lowercase , np.array(_lowercase ) , np.array(_lowercase ) , return_tensors="tf" ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) UpperCAmelCase__ = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): UpperCAmelCase__ = processor.post_process_masks( _lowercase , np.array(_lowercase ) , np.array(_lowercase ) , return_tensors="tf" ) @require_vision @require_torchvision class lowercase__ ( unittest.TestCase ): def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = tempfile.mkdtemp() UpperCAmelCase__ = SamImageProcessor() UpperCAmelCase__ = SamProcessor(_lowercase ) processor.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self : str , **_lowercase : Optional[int] ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor def _UpperCAmelCase ( self : str ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self : str ): """simple docstring""" UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def _UpperCAmelCase ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=_lowercase ) UpperCAmelCase__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) UpperCAmelCase__ = [tf.convert_to_tensor(_lowercase )] UpperCAmelCase__ = [torch.tensor(_lowercase )] UpperCAmelCase__ = [[17_64, 26_46]] UpperCAmelCase__ = [[6_83, 10_24]] UpperCAmelCase__ = processor.post_process_masks( _lowercase , _lowercase , _lowercase , return_tensors="tf" ) UpperCAmelCase__ = processor.post_process_masks( _lowercase , _lowercase , _lowercase , return_tensors="pt" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def _UpperCAmelCase ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=_lowercase ) UpperCAmelCase__ = self.prepare_image_inputs() UpperCAmelCase__ = image_processor(_lowercase , return_tensors="pt" )["pixel_values"].numpy() UpperCAmelCase__ = processor(images=_lowercase , return_tensors="pt" )["pixel_values"].numpy() UpperCAmelCase__ = image_processor(_lowercase , return_tensors="tf" )["pixel_values"].numpy() UpperCAmelCase__ = processor(images=_lowercase , return_tensors="tf" )["pixel_values"].numpy() self.assertTrue(np.allclose(_lowercase , _lowercase ) ) self.assertTrue(np.allclose(_lowercase , _lowercase ) ) self.assertTrue(np.allclose(_lowercase , _lowercase ) )
475
1
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = TypeVar('''DatasetType''', Dataset, IterableDataset) def snake_case ( A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = "first_exhausted" ,): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(A__ ): if not isinstance(A__ ,(Dataset, IterableDataset) ): if isinstance(A__ ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A__ ).__name__}.""" ) if i == 0: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = ( (Dataset, IterableDataset) if isinstance(A__ ,A__ ) else (IterableDataset, Dataset) ) elif not isinstance(A__ ,A__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( A__ ,A__ ,A__ ,info=A__ ,split=A__ ,stopping_strategy=A__ ) else: return _interleave_iterable_datasets( A__ ,A__ ,A__ ,info=A__ ,split=A__ ,stopping_strategy=A__ ) def snake_case ( A__ ,A__ = None ,A__ = None ,A__ = 0 ,): if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(A__ ): if not isinstance(A__ ,(Dataset, IterableDataset) ): if isinstance(A__ ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A__ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A__ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A__ ).__name__}.""" ) if i == 0: UpperCAmelCase_ , UpperCAmelCase_ : str = ( (Dataset, IterableDataset) if isinstance(A__ ,A__ ) else (IterableDataset, Dataset) ) elif not isinstance(A__ ,A__ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A__ ,info=A__ ,split=A__ ,axis=A__ ) else: return _concatenate_iterable_datasets(A__ ,info=A__ ,split=A__ ,axis=A__ )
463
"""simple docstring""" import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class UpperCamelCase_ (__A ): def __init__( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Tuple ) -> Dict: UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = config_class UpperCAmelCase_ : List[str] = has_text_modality UpperCAmelCase_ : Tuple = kwargs UpperCAmelCase_ : int = common_properties def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = self.config_class(**self.inputs_dict ) UpperCAmelCase_ : int = ( ["hidden_size", "num_attention_heads", "num_hidden_layers"] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["vocab_size"] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) , msg=f"""`{prop}` does not exist""" ) # Test that config has the common properties as setter for idx, name in enumerate(lowerCAmelCase_ ): try: setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) self.parent.assertEqual( getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , msg=f"""`{name} value {idx} expected, but was {getattr(lowerCAmelCase_ , lowerCAmelCase_ )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(lowerCAmelCase_ ): try: UpperCAmelCase_ : Optional[Any] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , msg=f"""`{name} value {idx} expected, but was {getattr(lowerCAmelCase_ , lowerCAmelCase_ )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: UpperCAmelCase_ : str = self.config_class(**self.inputs_dict ) UpperCAmelCase_ : List[str] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : Optional[Any] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : List[str] = os.path.join(lowerCAmelCase_ , "config.json" ) config_first.to_json_file(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.config_class.from_json_file(lowerCAmelCase_ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self.config_class.from_pretrained(lowerCAmelCase_ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict ) UpperCAmelCase_ : int = "test" with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : Optional[int] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) config_first.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.config_class.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: UpperCAmelCase_ : List[str] = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) UpperCAmelCase_ : List[Any] = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: if self.config_class.is_composition: return UpperCAmelCase_ : str = self.config_class() self.parent.assertIsNotNone(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = copy.deepcopy(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.config_class(**lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) ) elif getattr(lowerCAmelCase_ , lowerCAmelCase_ ) != value: wrong_values.append((key, getattr(lowerCAmelCase_ , lowerCAmelCase_ ), value) ) if len(lowerCAmelCase_ ) > 0: UpperCAmelCase_ : Any = "\n".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] ) raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
463
1
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def A_ ( snake_case__ ) -> Optional[Any]: _UpperCamelCase :Optional[int] = {} _UpperCamelCase :str = job["""started_at"""] _UpperCamelCase :str = job["""completed_at"""] _UpperCamelCase :str = date_parser.parse(snake_case_ ) _UpperCamelCase :int = date_parser.parse(snake_case_ ) _UpperCamelCase :Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 ) _UpperCamelCase :Optional[int] = start _UpperCamelCase :List[str] = end _UpperCamelCase :str = duration_in_min return job_info def A_ ( snake_case__ , snake_case__=None ) -> Tuple: _UpperCamelCase :Optional[int] = None if token is not None: _UpperCamelCase :Tuple = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"} _UpperCamelCase :Union[str, Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" _UpperCamelCase :Optional[int] = requests.get(snake_case_ , headers=snake_case_ ).json() _UpperCamelCase :Optional[int] = {} try: job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} ) _UpperCamelCase :int = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(snake_case_ ): _UpperCamelCase :Tuple = requests.get(url + f"&page={i + 2}" , headers=snake_case_ ).json() job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} ) return job_time except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} if __name__ == "__main__": UpperCamelCase__ :str = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") UpperCamelCase__ :List[Any] = parser.parse_args() UpperCamelCase__ :Union[str, Any] = get_job_time(args.workflow_run_id) UpperCamelCase__ :Union[str, Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v["duration"]}""")
355
"""simple docstring""" import math def A_ ( snake_case_ : list ,snake_case_ : int = 0 ,snake_case_ : int = 0 ): '''simple docstring''' UpperCamelCase : Optional[Any] = end or len(snake_case_ ) for i in range(snake_case_ ,snake_case_ ): UpperCamelCase : List[str] = i UpperCamelCase : List[str] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: UpperCamelCase : Dict = array[temp_index - 1] temp_index -= 1 UpperCamelCase : Dict = temp_index_value return array def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ): # Max Heap '''simple docstring''' UpperCamelCase : Optional[Any] = index UpperCamelCase : Union[str, Any] = 2 * index + 1 # Left Node UpperCamelCase : Tuple = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: UpperCamelCase : Union[str, Any] = left_index if right_index < heap_size and array[largest] < array[right_index]: UpperCamelCase : List[str] = right_index if largest != index: UpperCamelCase , UpperCamelCase : Any = array[largest], array[index] heapify(snake_case_ ,snake_case_ ,snake_case_ ) def A_ ( snake_case_ : list ): '''simple docstring''' UpperCamelCase : Any = len(snake_case_ ) for i in range(n // 2 ,-1 ,-1 ): heapify(snake_case_ ,snake_case_ ,snake_case_ ) for i in range(n - 1 ,0 ,-1 ): UpperCamelCase , UpperCamelCase : str = array[0], array[i] heapify(snake_case_ ,0 ,snake_case_ ) return array def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ): '''simple docstring''' UpperCamelCase : Any = low UpperCamelCase : Optional[Any] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i UpperCamelCase , UpperCamelCase : Union[str, Any] = array[j], array[i] i += 1 def A_ ( snake_case_ : list ): '''simple docstring''' if len(snake_case_ ) == 0: return array UpperCamelCase : Union[str, Any] = 2 * math.ceil(math.loga(len(snake_case_ ) ) ) UpperCamelCase : int = 1_6 return intro_sort(snake_case_ ,0 ,len(snake_case_ ) ,snake_case_ ,snake_case_ ) def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(snake_case_ ) max_depth -= 1 UpperCamelCase : Dict = median_of_a(snake_case_ ,snake_case_ ,start + ((end - start) // 2) + 1 ,end - 1 ) UpperCamelCase : str = partition(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ) intro_sort(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ) UpperCamelCase : List[str] = p return insertion_sort(snake_case_ ,snake_case_ ,snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() __A : Optional[int] = input('''Enter numbers separated by a comma : ''').strip() __A : List[Any] = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
499
0
'''simple docstring''' class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , a_ : int ): """simple docstring""" __snake_case = n __snake_case = [None] * self.n __snake_case = 0 # index of the first element __snake_case = 0 __snake_case = 0 def __len__( self : Optional[Any] ): """simple docstring""" return self.size def A ( self : Optional[int] ): """simple docstring""" return self.size == 0 def A ( self : int ): """simple docstring""" return False if self.is_empty() else self.array[self.front] def A ( self : Any , a_ : List[str] ): """simple docstring""" if self.size >= self.n: raise Exception("QUEUE IS FULL" ) __snake_case = data __snake_case = (self.rear + 1) % self.n self.size += 1 return self def A ( self : Dict ): """simple docstring""" if self.size == 0: raise Exception("UNDERFLOW" ) __snake_case = self.array[self.front] __snake_case = None __snake_case = (self.front + 1) % self.n self.size -= 1 return temp
710
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a : Any = 6_378_137.0 a : List[Any] = 6_356_752.314_245 a : Dict = 6_378_137 def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: __snake_case = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) __snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values __snake_case = (b_lata + b_lata) / 2 __snake_case = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2) __snake_case = cos(sigma / 2 ) ** 2 __snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2) __snake_case = sin(sigma / 2 ) ** 2 __snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
680
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase): def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict: '''simple docstring''' UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size if size is not None else {'height': 18, 'width': 20} UpperCamelCase = do_thumbnail UpperCamelCase = do_align_axis UpperCamelCase = do_pad UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase): lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self )-> str: '''simple docstring''' UpperCamelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase_ ( self )-> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , 'do_resize' ) ) self.assertTrue(hasattr(A_ , 'size' ) ) self.assertTrue(hasattr(A_ , 'do_thumbnail' ) ) self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) ) self.assertTrue(hasattr(A_ , 'do_pad' ) ) self.assertTrue(hasattr(A_ , 'do_normalize' ) ) self.assertTrue(hasattr(A_ , 'image_mean' ) ) self.assertTrue(hasattr(A_ , 'image_std' ) ) def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' pass @is_flaky() def UpperCAmelCase_ ( self )-> Any: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
3
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ : Union[str, Any] ={"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Union[str, Any] =["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : str =["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Any =[ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Tuple =[ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys A_ : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
650
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = 16 ) -> Optional[Any]: lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCAmelCase__ : Dict = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase__ : Any = datasets.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase__ : Optional[Any] = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase__ : str = 8 else: lowerCAmelCase__ : Union[str, Any] = None return tokenizer.pad( UpperCamelCase , padding='''longest''' , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCAmelCase__ : int = DataLoader( tokenized_datasets['''train'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase ) lowerCAmelCase__ : Tuple = DataLoader( tokenized_datasets['''validation'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase_ = mocked_dataloaders # noqa: F811 def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCamelCase ) == "1": lowerCAmelCase__ : Union[str, Any] = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: lowerCAmelCase__ : Optional[int] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: lowerCAmelCase__ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase__ : Optional[Any] = config['''lr'''] lowerCAmelCase__ : List[Any] = int(config['''num_epochs'''] ) lowerCAmelCase__ : Optional[Any] = int(config['''seed'''] ) lowerCAmelCase__ : Dict = int(config['''batch_size'''] ) set_seed(UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[int] = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation lowerCAmelCase__ : Union[str, Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCAmelCase__ : int = batch_size // MAX_GPU_BATCH_SIZE lowerCAmelCase__ : Dict = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase__ : Dict = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=UpperCamelCase ) # Instantiate scheduler lowerCAmelCase__ : List[Any] = get_linear_schedule_with_warmup( optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: lowerCAmelCase__ : str = os.path.split(UpperCamelCase )[-1].split('''.''' )[0] accelerator.init_trackers(UpperCamelCase , UpperCamelCase ) # Now we train the model for epoch in range(UpperCamelCase ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: lowerCAmelCase__ : str = 0 for step, batch in enumerate(UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCAmelCase__ : List[str] = model(**UpperCamelCase ) lowerCAmelCase__ : List[Any] = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() lowerCAmelCase__ : List[str] = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = outputs.logits.argmax(dim=-1 ) lowerCAmelCase__ , lowerCAmelCase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=UpperCamelCase , references=UpperCamelCase , ) lowerCAmelCase__ : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(UpperCamelCase ), '''epoch''': epoch, } , step=UpperCamelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def __lowerCAmelCase ( ) -> Any: lowerCAmelCase__ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=UpperCamelCase , default=UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=UpperCamelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) lowerCAmelCase__ : int = parser.parse_args() lowerCAmelCase__ : Dict = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": main()
470
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( """The `inpainting.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionInpaintPipeline` instead.""" )
470
1
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "geglu" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "layer_norm" , __SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: """simple docstring""" super().__init__() UpperCamelCase__ : str = only_cross_attention UpperCamelCase__ : Tuple = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' UpperCamelCase__ : Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: UpperCamelCase__ : Union[str, Any] = AdaLayerNorm(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif self.use_ada_layer_norm_zero: UpperCamelCase__ : int = AdaLayerNormZero(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: UpperCamelCase__ : Optional[int] = nn.LayerNorm(__SCREAMING_SNAKE_CASE , elementwise_affine=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[Any] = Attention( query_dim=__SCREAMING_SNAKE_CASE , heads=__SCREAMING_SNAKE_CASE , dim_head=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__SCREAMING_SNAKE_CASE , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. UpperCamelCase__ : Any = ( AdaLayerNorm(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else nn.LayerNorm(__SCREAMING_SNAKE_CASE , elementwise_affine=__SCREAMING_SNAKE_CASE ) ) UpperCamelCase__ : List[Any] = Attention( query_dim=__SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__SCREAMING_SNAKE_CASE , dim_head=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , upcast_attention=__SCREAMING_SNAKE_CASE , ) # is self-attn if encoder_hidden_states is none else: UpperCamelCase__ : List[str] = None UpperCamelCase__ : str = None # 3. Feed-forward UpperCamelCase__ : Union[str, Any] = nn.LayerNorm(__SCREAMING_SNAKE_CASE , elementwise_affine=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Union[str, Any] = FeedForward(__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , final_dropout=__SCREAMING_SNAKE_CASE ) # let chunk size default to None UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Dict = 0 def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" UpperCamelCase__ : Union[str, Any] = chunk_size UpperCamelCase__ : Optional[int] = dim def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> int: """simple docstring""" if self.use_ada_layer_norm: UpperCamelCase__ : List[str] = self.norma(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif self.use_ada_layer_norm_zero: UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.norma( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hidden_dtype=hidden_states.dtype ) else: UpperCamelCase__ : Optional[int] = self.norma(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} UpperCamelCase__ : Union[str, Any] = self.attna( __SCREAMING_SNAKE_CASE , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if self.use_ada_layer_norm_zero: UpperCamelCase__ : int = gate_msa.unsqueeze(1 ) * attn_output UpperCamelCase__ : Optional[int] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: UpperCamelCase__ : Any = ( self.norma(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(__SCREAMING_SNAKE_CASE ) ) UpperCamelCase__ : str = self.attna( __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase__ : Dict = attn_output + hidden_states # 3. Feed-forward UpperCamelCase__ : List[str] = self.norma(__SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm_zero: UpperCamelCase__ : Optional[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) UpperCamelCase__ : Optional[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size UpperCamelCase__ : Tuple = torch.cat( [self.ff(__SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(__SCREAMING_SNAKE_CASE , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: UpperCamelCase__ : Tuple = self.ff(__SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm_zero: UpperCamelCase__ : List[str] = gate_mlp.unsqueeze(1 ) * ff_output UpperCamelCase__ : str = ff_output + hidden_states return hidden_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 4 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = "geglu" , __SCREAMING_SNAKE_CASE = False , ) -> Dict: """simple docstring""" super().__init__() UpperCamelCase__ : int = int(dim * mult ) UpperCamelCase__ : int = dim_out if dim_out is not None else dim if activation_fn == "gelu": UpperCamelCase__ : Tuple = GELU(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if activation_fn == "gelu-approximate": UpperCamelCase__ : Optional[Any] = GELU(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , approximate='''tanh''' ) elif activation_fn == "geglu": UpperCamelCase__ : Any = GEGLU(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif activation_fn == "geglu-approximate": UpperCamelCase__ : List[str] = ApproximateGELU(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = nn.ModuleList([] ) # project in self.net.append(__SCREAMING_SNAKE_CASE ) # project dropout self.net.append(nn.Dropout(__SCREAMING_SNAKE_CASE ) ) # project out self.net.append(nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(__SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" for module in self.net: UpperCamelCase__ : str = module(__SCREAMING_SNAKE_CASE ) return hidden_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "none" ) -> Union[str, Any]: """simple docstring""" super().__init__() UpperCamelCase__ : Any = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Any = approximate def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if gate.device.type != "mps": return F.gelu(__SCREAMING_SNAKE_CASE , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" UpperCamelCase__ : Optional[int] = self.proj(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : str = self.gelu(__SCREAMING_SNAKE_CASE ) return hidden_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" super().__init__() UpperCamelCase__ : Dict = nn.Linear(__SCREAMING_SNAKE_CASE , dim_out * 2 ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" if gate.device.type != "mps": return F.gelu(__SCREAMING_SNAKE_CASE ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ : int = self.proj(__SCREAMING_SNAKE_CASE ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(__SCREAMING_SNAKE_CASE ) class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" super().__init__() UpperCamelCase__ : List[str] = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ : Any = self.proj(__SCREAMING_SNAKE_CASE ) return x * torch.sigmoid(1.702 * x ) class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" super().__init__() UpperCamelCase__ : Tuple = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Tuple = nn.SiLU() UpperCamelCase__ : str = nn.Linear(__SCREAMING_SNAKE_CASE , embedding_dim * 2 ) UpperCamelCase__ : Optional[Any] = nn.LayerNorm(__SCREAMING_SNAKE_CASE , elementwise_affine=__SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" UpperCamelCase__ : Union[str, Any] = self.linear(self.silu(self.emb(__SCREAMING_SNAKE_CASE ) ) ) UpperCamelCase__ ,UpperCamelCase__ : Dict = torch.chunk(__SCREAMING_SNAKE_CASE , 2 ) UpperCamelCase__ : Optional[Any] = self.norm(__SCREAMING_SNAKE_CASE ) * (1 + scale) + shift return x class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__() UpperCamelCase__ : Optional[int] = CombinedTimestepLabelEmbeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[str] = nn.SiLU() UpperCamelCase__ : Optional[Any] = nn.Linear(__SCREAMING_SNAKE_CASE , 6 * embedding_dim , bias=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = nn.LayerNorm(__SCREAMING_SNAKE_CASE , elementwise_affine=__SCREAMING_SNAKE_CASE , eps=1e-6 ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Dict: """simple docstring""" UpperCamelCase__ : Tuple = self.linear(self.silu(self.emb(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hidden_dtype=__SCREAMING_SNAKE_CASE ) ) ) UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = emb.chunk(6 , dim=1 ) UpperCamelCase__ : Tuple = self.norm(__SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1e-5 ) -> Any: """simple docstring""" super().__init__() UpperCamelCase__ : Tuple = num_groups UpperCamelCase__ : int = eps if act_fn is None: UpperCamelCase__ : Any = None else: UpperCamelCase__ : int = get_activation(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : int = nn.Linear(__SCREAMING_SNAKE_CASE , out_dim * 2 ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if self.act: UpperCamelCase__ : List[str] = self.act(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Tuple = self.linear(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[Any] = emb[:, :, None, None] UpperCamelCase__ ,UpperCamelCase__ : str = emb.chunk(2 , dim=1 ) UpperCamelCase__ : Optional[Any] = F.group_norm(__SCREAMING_SNAKE_CASE , self.num_groups , eps=self.eps ) UpperCamelCase__ : int = x * (1 + scale) + shift return x
285
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> List[str]: """simple docstring""" UpperCamelCase__ : Any = parent UpperCamelCase__ : str = batch_size UpperCamelCase__ : List[Any] = seq_length UpperCamelCase__ : List[Any] = is_training UpperCamelCase__ : Any = use_input_mask UpperCamelCase__ : Dict = use_token_type_ids UpperCamelCase__ : List[str] = use_labels UpperCamelCase__ : Optional[int] = vocab_size UpperCamelCase__ : Union[str, Any] = hidden_size UpperCamelCase__ : int = num_hidden_layers UpperCamelCase__ : Union[str, Any] = num_attention_heads UpperCamelCase__ : Union[str, Any] = intermediate_size UpperCamelCase__ : Optional[int] = hidden_act UpperCamelCase__ : Optional[Any] = hidden_dropout_prob UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCamelCase__ : Union[str, Any] = max_position_embeddings UpperCamelCase__ : Optional[Any] = type_vocab_size UpperCamelCase__ : int = type_sequence_label_size UpperCamelCase__ : Tuple = initializer_range UpperCamelCase__ : Optional[Any] = num_labels UpperCamelCase__ : List[str] = num_choices UpperCamelCase__ : Union[str, Any] = scope def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ : Tuple = None if self.use_input_mask: UpperCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ : int = None if self.use_token_type_ids: UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ : Union[str, Any] = None UpperCamelCase__ : int = None UpperCamelCase__ : Tuple = None if self.use_labels: UpperCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ : int = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ : List[Any] = BioGptModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" UpperCamelCase__ : Dict = BioGptForCausalLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" UpperCamelCase__ : str = BioGptModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() # create attention mask UpperCamelCase__ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[int] = self.seq_length // 2 UpperCamelCase__ : List[str] = 0 # first forward pass UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCamelCase__ : Tuple = ids_tensor((1,) , __SCREAMING_SNAKE_CASE ).item() + 1 UpperCamelCase__ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCamelCase__ : int = random_other_next_tokens # append to next input_ids and attn_mask UpperCamelCase__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ : List[Any] = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )] , dim=1 , ) # get two different outputs UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state'''] UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state'''] # select random slice UpperCamelCase__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach() UpperCamelCase__ : str = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase__ : List[str] = BioGptModel(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval() UpperCamelCase__ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) # first forward pass UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ ,UpperCamelCase__ : List[Any] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ : Any = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase__ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state'''] UpperCamelCase__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[ '''last_hidden_state''' ] # select random slice UpperCamelCase__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" UpperCamelCase__ : Optional[Any] = BioGptForCausalLM(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCamelCase__ : Tuple = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" UpperCamelCase__ : str = BioGptModel(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" UpperCamelCase__ : Union[str, Any] = self.num_labels UpperCamelCase__ : Any = BioGptForTokenClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ : int = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) ,( UpperCamelCase__ ) ,( UpperCamelCase__ ) ,( UpperCamelCase__ ) ,( UpperCamelCase__ ) ,( UpperCamelCase__ ) ,( UpperCamelCase__ ) , ) : List[str] = config_and_inputs UpperCamelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ = (BioGptForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ = ( { '''feature-extraction''': BioGptModel, '''text-classification''': BioGptForSequenceClassification, '''text-generation''': BioGptForCausalLM, '''token-classification''': BioGptForTokenClassification, '''zero-shot''': BioGptForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = False def __SCREAMING_SNAKE_CASE ( self ) -> str: """simple docstring""" UpperCamelCase__ : Dict = BioGptModelTester(self ) UpperCamelCase__ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: """simple docstring""" UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ : Optional[Any] = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: """simple docstring""" UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__SCREAMING_SNAKE_CASE , gradient_checkpointing=__SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: """simple docstring""" UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: """simple docstring""" UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Dict: """simple docstring""" UpperCamelCase__ : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCamelCase__ : Optional[Any] = '''left''' # Define PAD Token = EOS Token = 50256 UpperCamelCase__ : Optional[int] = tokenizer.eos_token UpperCamelCase__ : List[str] = model.config.eos_token_id # use different length sentences to test batching UpperCamelCase__ : Optional[int] = [ '''Hello, my dog is a little''', '''Today, I''', ] UpperCamelCase__ : Dict = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : int = inputs['''input_ids'''].to(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : int = model.generate( input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''].to(__SCREAMING_SNAKE_CASE ) , ) UpperCamelCase__ : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : str = model.generate(input_ids=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() UpperCamelCase__ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Dict = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings ) UpperCamelCase__ : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Any = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ : Optional[int] = BioGptModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ : int = 3 UpperCamelCase__ : Tuple = input_dict['''input_ids'''] UpperCamelCase__ : List[Any] = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase__ : Optional[int] = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: """simple docstring""" UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ : List[str] = 3 UpperCamelCase__ : List[Any] = '''multi_label_classification''' UpperCamelCase__ : List[str] = input_dict['''input_ids'''] UpperCamelCase__ : Tuple = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[int] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase__ : List[str] = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) UpperCamelCase__ : Union[str, Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] ) UpperCamelCase__ : Tuple = model(__SCREAMING_SNAKE_CASE )[0] UpperCamelCase__ : Optional[Any] = 4_2_3_8_4 UpperCamelCase__ : List[Any] = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase__ : List[Any] = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: """simple docstring""" UpperCamelCase__ : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCamelCase__ : List[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__SCREAMING_SNAKE_CASE ) torch.manual_seed(0 ) UpperCamelCase__ : Dict = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[int] = model.generate( **__SCREAMING_SNAKE_CASE , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=__SCREAMING_SNAKE_CASE , ) UpperCamelCase__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Union[str, Any] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
285
1
from __future__ import annotations def UpperCamelCase__( UpperCamelCase__ : list[int | str] )->None: create_state_space_tree(UpperCamelCase__ , [] , 0 , [0 for i in range(len(UpperCamelCase__ ) )] ) def UpperCamelCase__( UpperCamelCase__ : list[int | str] , UpperCamelCase__ : list[int | str] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , )->None: if index == len(UpperCamelCase__ ): print(UpperCamelCase__ ) return for i in range(len(UpperCamelCase__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) A__ = True create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 , UpperCamelCase__ ) current_sequence.pop() A__ = False a__: list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) a__: list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
212
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] )->List[str]: A__ = [1] for i in range(2 , UpperCamelCase__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" A__ = [] A__ = list(range(UpperCamelCase__ ) ) # Find permutation while factorials: A__ = factorials.pop() A__ , A__ = divmod(UpperCamelCase__ , UpperCamelCase__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
212
1
import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib _SCREAMING_SNAKE_CASE : Dict = threading.Lock() _SCREAMING_SNAKE_CASE : Optional[logging.Handler] = None _SCREAMING_SNAKE_CASE : List[Any] = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _SCREAMING_SNAKE_CASE : List[str] = logging.WARNING _SCREAMING_SNAKE_CASE : Optional[int] = True def UpperCAmelCase__ (): """simple docstring""" snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' ,UpperCamelCase_ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ''' F'''has to be one of: { ", ".join(log_levels.keys() ) }''' ) return _default_log_level def UpperCAmelCase__ (): """simple docstring""" return __name__.split('''.''' )[0] def UpperCAmelCase__ (): """simple docstring""" return logging.getLogger(_get_library_name() ) def UpperCAmelCase__ (): """simple docstring""" global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return snake_case = logging.StreamHandler() # Set sys.stderr as stream. snake_case = sys.stderr.flush # Apply our default configuration to the library root logger. snake_case = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) snake_case = False def UpperCAmelCase__ (): """simple docstring""" global _default_handler with _lock: if not _default_handler: return snake_case = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) snake_case = None def UpperCAmelCase__ (): """simple docstring""" return log_levels def UpperCAmelCase__ (UpperCamelCase_ = None ): """simple docstring""" if name is None: snake_case = _get_library_name() _configure_library_root_logger() return logging.getLogger(UpperCamelCase_ ) def UpperCAmelCase__ (): """simple docstring""" _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" _configure_library_root_logger() _get_library_root_logger().setLevel(UpperCamelCase_ ) def UpperCAmelCase__ (): """simple docstring""" return set_verbosity(UpperCamelCase_ ) def UpperCAmelCase__ (): """simple docstring""" return set_verbosity(UpperCamelCase_ ) def UpperCAmelCase__ (): """simple docstring""" return set_verbosity(UpperCamelCase_ ) def UpperCAmelCase__ (): """simple docstring""" return set_verbosity(UpperCamelCase_ ) def UpperCAmelCase__ (): """simple docstring""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def UpperCAmelCase__ (): """simple docstring""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(UpperCamelCase_ ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(UpperCamelCase_ ) def UpperCAmelCase__ (): """simple docstring""" _configure_library_root_logger() snake_case = False def UpperCAmelCase__ (): """simple docstring""" _configure_library_root_logger() snake_case = True def UpperCAmelCase__ (): """simple docstring""" snake_case = _get_library_root_logger().handlers for handler in handlers: snake_case = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' ) handler.setFormatter(UpperCamelCase_ ) def UpperCAmelCase__ (): """simple docstring""" snake_case = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(UpperCamelCase_ ) def UpperCAmelCase__ (self ,*UpperCamelCase_ ,**UpperCamelCase_ ): """simple docstring""" snake_case = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' ,UpperCamelCase_ ) if no_advisory_warnings: return self.warning(*UpperCamelCase_ ,**UpperCamelCase_ ) _SCREAMING_SNAKE_CASE : int = warning_advice @functools.lru_cache(UpperCamelCase_ ) def UpperCAmelCase__ (self ,*UpperCamelCase_ ,**UpperCamelCase_ ): """simple docstring""" self.warning(*UpperCamelCase_ ,**UpperCamelCase_ ) _SCREAMING_SNAKE_CASE : Tuple = warning_once class A__ : """simple docstring""" def __init__( self , *__snake_case , **__snake_case ): # pylint: disable=unused-argument snake_case = args[0] if args else None def __iter__( self ): return iter(self._iterator ) def __getattr__( self , __snake_case ): def empty_fn(*__snake_case , **__snake_case ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): return self def __exit__( self , __snake_case , __snake_case , __snake_case ): return class A__ : """simple docstring""" def __call__( self , *__snake_case , **__snake_case ): if _tqdm_active: return tqdm_lib.tqdm(*__snake_case , **__snake_case ) else: return EmptyTqdm(*__snake_case , **__snake_case ) def a_ ( self , *__snake_case , **__snake_case ): snake_case = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*__snake_case , **__snake_case ) def a_ ( self ): if _tqdm_active: return tqdm_lib.tqdm.get_lock() _SCREAMING_SNAKE_CASE : int = _tqdm_cls() def UpperCAmelCase__ (): """simple docstring""" global _tqdm_active return bool(_tqdm_active ) def UpperCAmelCase__ (): """simple docstring""" global _tqdm_active snake_case = True hf_hub_utils.enable_progress_bars() def UpperCAmelCase__ (): """simple docstring""" global _tqdm_active snake_case = False hf_hub_utils.disable_progress_bars()
550
from __future__ import annotations from math import pow, sqrt def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance == 0: return {"resistance": sqrt(pow(UpperCamelCase_ ,2 ) - pow(UpperCamelCase_ ,2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(UpperCamelCase_ ,2 ) - pow(UpperCamelCase_ ,2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(UpperCamelCase_ ,2 ) + pow(UpperCamelCase_ ,2 ) )} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
550
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = StableDiffusionXLImgaImgPipeline _snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} _snake_case = PipelineTesterMixin.required_optional_params - {'latents'} _snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS _snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def A__ ( self )-> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE_ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) __UpperCamelCase = EulerDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , ) __UpperCamelCase = CLIPTextModel(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 )-> Dict: '''simple docstring''' __UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = image / 2 + 0.5 if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ): __UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: __UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.7_5, } return inputs def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A__ ( self )-> Union[str, Any]: '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def A__ ( self )-> str: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def A__ ( self )-> List[Any]: '''simple docstring''' pass def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) # forward without prompt embeds __UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = 3 * ['''this is a negative prompt'''] __UpperCamelCase = negative_prompt __UpperCamelCase = 3 * [inputs['''prompt''']] __UpperCamelCase = sd_pipe(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = output.images[0, -3:, -3:, -1] # forward with prompt embeds __UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = 3 * ['''this is a negative prompt'''] __UpperCamelCase = 3 * [inputs.pop('''prompt''' )] ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = sd_pipe.encode_prompt(SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe( **SCREAMING_SNAKE_CASE_ , prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , pooled_prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_pooled_prompt_embeds=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" , SCREAMING_SNAKE_CASE_=torch.floataa , SCREAMING_SNAKE_CASE_=0 )-> Any: '''simple docstring''' __UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 4, 64, 64) ) __UpperCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.get_inputs(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = pipe(**SCREAMING_SNAKE_CASE_ ).images __UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __UpperCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
451
import os import re import shutil import sys import tempfile import unittest import black lowercase__ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. lowercase__ : Dict = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) __UpperCamelCase = self.diffusers_dir shutil.copy( os.path.join(SCREAMING_SNAKE_CASE_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: __UpperCamelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result __UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) __UpperCamelCase = black.format_str(SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , newline='''\n''' ) as f: f.write(SCREAMING_SNAKE_CASE_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f: self.assertTrue(f.read() , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> List[Any]: '''simple docstring''' __UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> List[str]: '''simple docstring''' self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE_ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE_ ) , ) # Copy consistency with a really long name __UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , F"{long_class_name}SchedulerOutput" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE_ ) , )
451
1
from decimal import Decimal, getcontext from math import ceil, factorial def UpperCamelCase ( _A : List[Any] )-> str: """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) A__ = precision A__ = ceil(precision / 14 ) A__ = 426880 * Decimal(10005 ).sqrt() A__ = 1 A__ = 13591409 A__ = Decimal(_UpperCAmelCase ) for k in range(1 , _UpperCAmelCase ): A__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(_UpperCAmelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": UpperCAmelCase_ : Any = 50 print(F'''The first {n} digits of pi is: {pi(n)}''')
491
from ... import PretrainedConfig __A : int = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class __A ( lowerCAmelCase ): lowerCAmelCase_ : List[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP lowerCAmelCase_ : str = "nezha" def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int]=21128 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=1E-12 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Optional[Any]=True , **UpperCAmelCase_ : Any , ): super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase : Dict = vocab_size lowerCAmelCase : Union[str, Any] = hidden_size lowerCAmelCase : Tuple = num_hidden_layers lowerCAmelCase : Tuple = num_attention_heads lowerCAmelCase : List[str] = hidden_act lowerCAmelCase : Tuple = intermediate_size lowerCAmelCase : Any = hidden_dropout_prob lowerCAmelCase : Any = attention_probs_dropout_prob lowerCAmelCase : List[Any] = max_position_embeddings lowerCAmelCase : Tuple = max_relative_position lowerCAmelCase : Tuple = type_vocab_size lowerCAmelCase : int = initializer_range lowerCAmelCase : int = layer_norm_eps lowerCAmelCase : List[str] = classifier_dropout lowerCAmelCase : Optional[Any] = use_cache
343
0
from __future__ import annotations from collections.abc import MutableSequence class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" if len(__SCREAMING_SNAKE_CASE ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) UpperCamelCase : list[float] = list(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = degree def __add__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if self.degree > polynomial_a.degree: UpperCamelCase : int = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , __SCREAMING_SNAKE_CASE ) else: UpperCamelCase : int = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , __SCREAMING_SNAKE_CASE ) def __sub__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self ): """simple docstring""" return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , __SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ): """simple docstring""" UpperCamelCase : List[Any] = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__SCREAMING_SNAKE_CASE ) return polynomial def __repr__( self ): """simple docstring""" return self.__str__() def _lowercase ( self ): """simple docstring""" UpperCamelCase : list[float] = [0] * self.degree for i in range(self.degree ): UpperCamelCase : int = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , __SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE = 0 ): """simple docstring""" UpperCamelCase : list[float] = [0] * (self.degree + 2) UpperCamelCase : Optional[Any] = constant for i in range(self.degree + 1 ): UpperCamelCase : Optional[Any] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , __SCREAMING_SNAKE_CASE ) def __eq__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return not self.__eq__(__SCREAMING_SNAKE_CASE )
643
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : Any = XGLMConfig __UpperCamelCase : Dict = {} __UpperCamelCase : List[str] = "gelu" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ): """simple docstring""" UpperCamelCase : Any = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : str = seq_length UpperCamelCase : List[str] = is_training UpperCamelCase : Tuple = use_input_mask UpperCamelCase : Union[str, Any] = use_labels UpperCamelCase : int = vocab_size UpperCamelCase : Optional[int] = d_model UpperCamelCase : Any = num_hidden_layers UpperCamelCase : List[str] = num_attention_heads UpperCamelCase : Optional[Any] = ffn_dim UpperCamelCase : Optional[int] = activation_function UpperCamelCase : List[str] = activation_dropout UpperCamelCase : Any = attention_dropout UpperCamelCase : str = max_position_embeddings UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : int = None UpperCamelCase : Dict = 0 UpperCamelCase : int = 2 UpperCamelCase : Any = 1 def _lowercase ( self ): """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) UpperCamelCase : int = None if self.use_input_mask: UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Tuple = self.get_config() UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowercase ( self ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Dict = config_and_inputs UpperCamelCase : List[str] = { '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCamelCase : Any = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCamelCase : Optional[int] = False __UpperCamelCase : List[Any] = False __UpperCamelCase : List[Any] = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = TFXGLMModelTester(self ) UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() @slow def _lowercase ( self ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def _lowercase ( self ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @slow def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ): """simple docstring""" UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' ) UpperCamelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] ) UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = ( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) UpperCamelCase : Tuple = '''left''' # use different length sentences to test batching UpperCamelCase : Any = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = inputs['''input_ids'''] UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 ) UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 ) UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 ) UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
643
1
'''simple docstring''' import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Optional[Any] ): """simple docstring""" __snake_case = mock.Mock() __snake_case = 500 __snake_case = {} __snake_case = HTTPError __snake_case = {} # Download this model to make sure it's in the cache. __snake_case = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=a_ ) as mock_head: __snake_case = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def A ( self : Optional[Any] ): """simple docstring""" __snake_case = mock.Mock() __snake_case = 500 __snake_case = {} __snake_case = HTTPError __snake_case = {} # Download this model to make sure it's in the cache. __snake_case = GPTaTokenizerFast.from_pretrained("gpt2" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=a_ ) as mock_head: __snake_case = GPTaTokenizerFast.from_pretrained("gpt2" ) # This check we did call the fake head request mock_head.assert_called() def A ( self : Optional[Any] ): """simple docstring""" try: __snake_case = tempfile.mktemp() with open(a_ , "wb" ) as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , a_ ) __snake_case = AlbertTokenizer.from_pretrained(a_ ) finally: os.remove(a_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json" , "wb" ) as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , a_ ) __snake_case = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json" ) def A ( self : str ): """simple docstring""" __snake_case = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ) @is_staging_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): __SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def A ( cls : List[Any] ): """simple docstring""" __snake_case = TOKEN HfFolder.save_token(a_ ) @classmethod def A ( cls : List[Any] ): """simple docstring""" try: delete_repo(token=cls._token , repo_id="test-tokenizer" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" ) except HTTPError: pass def A ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __snake_case = os.path.join(a_ , "vocab.txt" ) with open(a_ , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) __snake_case = BertTokenizer(a_ ) tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token ) __snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="test-tokenizer" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(a_ , repo_id="test-tokenizer" , push_to_hub=a_ , use_auth_token=self._token ) __snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def A ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __snake_case = os.path.join(a_ , "vocab.txt" ) with open(a_ , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) __snake_case = BertTokenizer(a_ ) tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token ) __snake_case = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( a_ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=a_ , use_auth_token=self._token ) __snake_case = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def A ( self : List[str] ): """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: __snake_case = os.path.join(a_ , "vocab.txt" ) with open(a_ , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) __snake_case = CustomTokenizer(a_ ) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token ) __snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: __snake_case = os.path.join(a_ , "vocab.txt" ) with open(a_ , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) __snake_case = BertTokenizerFast.from_pretrained(a_ ) bert_tokenizer.save_pretrained(a_ ) __snake_case = CustomTokenizerFast.from_pretrained(a_ ) tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token ) __snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" ) __snake_case = AutoTokenizer.from_pretrained( f'''{USER}/test-dynamic-tokenizer''' , use_fast=a_ , trust_remote_code=a_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Optional[int] ): """simple docstring""" __snake_case = Trie() trie.add("Hello 友達" ) self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} ) trie.add("Hello" ) trie.data self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ) def A ( self : str ): """simple docstring""" __snake_case = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] ) trie.add("[CLS]" ) trie.add("extra_id_1" ) trie.add("extra_id_100" ) self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case = Trie() trie.add("A" ) self.assertEqual(trie.split("ABC" ) , ["A", "BC"] ) self.assertEqual(trie.split("BCA" ) , ["BC", "A"] ) def A ( self : List[Any] ): """simple docstring""" __snake_case = Trie() trie.add("TOKEN]" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] ) def A ( self : str ): """simple docstring""" __snake_case = Trie() trie.add("A" ) trie.add("P" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] ) def A ( self : Optional[int] ): """simple docstring""" __snake_case = Trie() trie.add("AB" ) trie.add("B" ) trie.add("C" ) self.assertEqual(trie.split("ABC" ) , ["AB", "C"] ) def A ( self : Tuple ): """simple docstring""" __snake_case = Trie() trie.add("ABC" ) trie.add("B" ) trie.add("CD" ) self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] ) def A ( self : Any ): """simple docstring""" __snake_case = Trie() __snake_case = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] ) self.assertEqual(a_ , ["AB", "C"] )
69
"""simple docstring""" from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a :str = 637_8137.0 a :Optional[Any] = 635_6752.31_4245 a :List[Any] = 6_378_137 def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float: SCREAMING_SNAKE_CASE__ : Dict = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) ) SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius SCREAMING_SNAKE_CASE__ : Tuple = haversine_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values SCREAMING_SNAKE_CASE__ : List[str] = (b_lata + b_lata) / 2 SCREAMING_SNAKE_CASE__ : Dict = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) SCREAMING_SNAKE_CASE__ : Tuple = (sin(__lowerCAmelCase ) ** 2) * (cos(__lowerCAmelCase ) ** 2) SCREAMING_SNAKE_CASE__ : str = cos(sigma / 2 ) ** 2 SCREAMING_SNAKE_CASE__ : List[str] = (sigma - sin(__lowerCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) SCREAMING_SNAKE_CASE__ : int = (cos(__lowerCAmelCase ) ** 2) * (sin(__lowerCAmelCase ) ** 2) SCREAMING_SNAKE_CASE__ : int = sin(sigma / 2 ) ** 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = (sigma + sin(__lowerCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
680
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: str ={ 'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class __A ( UpperCamelCase__ ): a__ : Optional[Any] = """pegasus""" a__ : List[Any] = ["""past_key_values"""] a__ : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self : str , __a : Union[str, Any]=50265 , __a : str=1024 , __a : Optional[int]=12 , __a : List[str]=4096 , __a : List[Any]=16 , __a : Union[str, Any]=12 , __a : Dict=4096 , __a : Dict=16 , __a : Tuple=0.0 , __a : int=0.0 , __a : str=True , __a : Optional[int]=True , __a : Optional[Any]="gelu" , __a : Dict=1024 , __a : List[Any]=0.1 , __a : List[Any]=0.0 , __a : Union[str, Any]=0.0 , __a : Union[str, Any]=0.02 , __a : List[Any]=0 , __a : Tuple=False , __a : int=0 , __a : Tuple=1 , __a : List[str]=1 , **__a : int , ): UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = use_cache UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , **__a , ) @property def _lowercase (self : str ): return self.encoder_attention_heads @property def _lowercase (self : Union[str, Any] ): return self.d_model
716
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Any class __A : def __init__(self : Dict , __a : Any ): UpperCAmelCase_ = data UpperCAmelCase_ = None class __A : def __init__(self : Dict ): UpperCAmelCase_ = None UpperCAmelCase_ = None def __iter__(self : Any ): UpperCAmelCase_ = self.head while self.head: yield node.data UpperCAmelCase_ = node.next if node == self.head: break def __len__(self : str ): return sum(1 for _ in self ) def __repr__(self : str ): return "->".join(str(__a ) for item in iter(self ) ) def _lowercase (self : Tuple , __a : Any ): self.insert_nth(len(self ) , __a ) def _lowercase (self : Optional[int] , __a : Any ): self.insert_nth(0 , __a ) def _lowercase (self : Union[str, Any] , __a : int , __a : Any ): if index < 0 or index > len(self ): raise IndexError("list index out of range." ) UpperCAmelCase_ = Node(__a ) if self.head is None: UpperCAmelCase_ = new_node # first node points itself UpperCAmelCase_ = UpperCAmelCase_ = new_node elif index == 0: # insert at head UpperCAmelCase_ = self.head UpperCAmelCase_ = UpperCAmelCase_ = new_node else: UpperCAmelCase_ = self.head for _ in range(index - 1 ): UpperCAmelCase_ = temp.next UpperCAmelCase_ = temp.next UpperCAmelCase_ = new_node if index == len(self ) - 1: # insert at tail UpperCAmelCase_ = new_node def _lowercase (self : int ): return self.delete_nth(0 ) def _lowercase (self : List[Any] ): return self.delete_nth(len(self ) - 1 ) def _lowercase (self : int , __a : int = 0 ): if not 0 <= index < len(self ): raise IndexError("list index out of range." ) UpperCAmelCase_ = self.head if self.head == self.tail: # just one node UpperCAmelCase_ = UpperCAmelCase_ = None elif index == 0: # delete head node UpperCAmelCase_ = self.tail.next.next UpperCAmelCase_ = self.head.next else: UpperCAmelCase_ = self.head for _ in range(index - 1 ): UpperCAmelCase_ = temp.next UpperCAmelCase_ = temp.next UpperCAmelCase_ = temp.next.next if index == len(self ) - 1: # delete at tail UpperCAmelCase_ = temp return delete_node.data def _lowercase (self : Optional[int] ): return len(self ) == 0 def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase_ = CircularLinkedList() assert len(snake_case_ ) == 0 assert circular_linked_list.is_empty() is True assert str(snake_case_ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(snake_case_ ) == i circular_linked_list.insert_nth(snake_case_ , i + 1 ) assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
415
0
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset UpperCAmelCase_ : List[str] = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) UpperCAmelCase_ : Union[str, Any] = dataset.iloc[:, 1:2].values UpperCAmelCase_ : Dict = dataset.iloc[:, 2].values UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = train_test_split(X, y, test_size=0.2, random_state=0) UpperCAmelCase_ : Union[str, Any] = PolynomialFeatures(degree=4) UpperCAmelCase_ : Any = poly_reg.fit_transform(X) UpperCAmelCase_ : Optional[Any] = LinearRegression() pol_reg.fit(X_poly, y) def lowerCAmelCase_ ( ): plt.scatter(lowerCamelCase , lowerCamelCase , color="""red""" ) plt.plot(lowerCamelCase , pol_reg.predict(poly_reg.fit_transform(lowerCamelCase ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
21
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") lowerCamelCase = logging.getLogger(__name__) @dataclass class _a : '''simple docstring''' A :Optional[int] = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A :bool = field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) A :bool = field( default=SCREAMING_SNAKE_CASE , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) A :Optional[int] = field( default=SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) A :Optional[int] = field( default=SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) A :Optional[int] = field( default=SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) @dataclass class _a : '''simple docstring''' A :str = field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A :str = field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} ) A :Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Train language if it is different from the evaluation language."} ) A :Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A :Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A :Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) A :Optional[bool] = field( default=SCREAMING_SNAKE_CASE , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , ) A :bool = field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) A :str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) A :bool = field( default=SCREAMING_SNAKE_CASE , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) A :bool = field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def SCREAMING_SNAKE_CASE( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) a__ , a__ , a__ : Optional[int] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_xnli" , __UpperCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() a__ : int = training_args.get_process_log_level() logger.setLevel(__UpperCamelCase ) datasets.utils.logging.set_verbosity(__UpperCamelCase ) transformers.utils.logging.set_verbosity(__UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. a__ : Union[str, Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: a__ : Dict = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: a__ : Any = load_dataset( "xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: a__ : List[str] = load_dataset( "xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) a__ : List[Any] = train_dataset.features["label"].names if training_args.do_eval: a__ : Tuple = load_dataset( "xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) a__ : int = eval_dataset.features["label"].names if training_args.do_predict: a__ : Dict = load_dataset( "xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) a__ : Optional[Any] = predict_dataset.features["label"].names # Labels a__ : List[Any] = len(__UpperCamelCase ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , idalabel={str(__UpperCamelCase ): label for i, label in enumerate(__UpperCamelCase )} , labelaid={label: i for i, label in enumerate(__UpperCamelCase )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) a__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: a__ : Dict = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch a__ : Optional[Any] = False def preprocess_function(__UpperCamelCase ): # Tokenize the texts return tokenizer( examples["premise"] , examples["hypothesis"] , padding=__UpperCamelCase , max_length=data_args.max_seq_length , truncation=__UpperCamelCase , ) if training_args.do_train: if data_args.max_train_samples is not None: a__ : int = min(len(__UpperCamelCase ) , data_args.max_train_samples ) a__ : Tuple = train_dataset.select(range(__UpperCamelCase ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): a__ : Optional[int] = train_dataset.map( __UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , ) # Log a few random samples from the training set: for index in random.sample(range(len(__UpperCamelCase ) ) , 3 ): logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' ) if training_args.do_eval: if data_args.max_eval_samples is not None: a__ : Optional[int] = min(len(__UpperCamelCase ) , data_args.max_eval_samples ) a__ : Optional[Any] = eval_dataset.select(range(__UpperCamelCase ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): a__ : List[Any] = eval_dataset.map( __UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: a__ : int = min(len(__UpperCamelCase ) , data_args.max_predict_samples ) a__ : List[str] = predict_dataset.select(range(__UpperCamelCase ) ) with training_args.main_process_first(desc="prediction dataset map pre-processing" ): a__ : Union[str, Any] = predict_dataset.map( __UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , ) # Get the metric function a__ : List[Any] = evaluate.load("xnli" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__UpperCamelCase ): a__ : Dict = p.predictions[0] if isinstance(p.predictions , __UpperCamelCase ) else p.predictions a__ : Any = np.argmax(__UpperCamelCase , axis=1 ) return metric.compute(predictions=__UpperCamelCase , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: a__ : List[Any] = default_data_collator elif training_args.fpaa: a__ : List[Any] = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) else: a__ : Tuple = None # Initialize our Trainer a__ : List[str] = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCamelCase , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: a__ : str = None if training_args.resume_from_checkpoint is not None: a__ : int = training_args.resume_from_checkpoint elif last_checkpoint is not None: a__ : Optional[int] = last_checkpoint a__ : Any = trainer.train(resume_from_checkpoint=__UpperCamelCase ) a__ : Tuple = train_result.metrics a__ : Union[str, Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCamelCase ) ) a__ : Optional[int] = min(__UpperCamelCase , len(__UpperCamelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , __UpperCamelCase ) trainer.save_metrics("train" , __UpperCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Optional[int] = trainer.evaluate(eval_dataset=__UpperCamelCase ) a__ : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCamelCase ) a__ : List[Any] = min(__UpperCamelCase , len(__UpperCamelCase ) ) trainer.log_metrics("eval" , __UpperCamelCase ) trainer.save_metrics("eval" , __UpperCamelCase ) # Prediction if training_args.do_predict: logger.info("*** Predict ***" ) a__ , a__ , a__ : int = trainer.predict(__UpperCamelCase , metric_key_prefix="predict" ) a__ : int = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__UpperCamelCase ) ) a__ : Union[str, Any] = min(__UpperCamelCase , len(__UpperCamelCase ) ) trainer.log_metrics("predict" , __UpperCamelCase ) trainer.save_metrics("predict" , __UpperCamelCase ) a__ : Dict = np.argmax(__UpperCamelCase , axis=1 ) a__ : str = os.path.join(training_args.output_dir , "predictions.txt" ) if trainer.is_world_process_zero(): with open(__UpperCamelCase , "w" ) as writer: writer.write("index\tprediction\n" ) for index, item in enumerate(__UpperCamelCase ): a__ : int = label_list[item] writer.write(F'{index}\t{item}\n' ) if __name__ == "__main__": main()
191
0
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class UpperCamelCase_ ( unittest.TestCase ): @require_torch def _snake_case ( self :Dict ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = pipeline( task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" ) SCREAMING_SNAKE_CASE__ = load_dataset("""ashraq/esc50""" ) SCREAMING_SNAKE_CASE__ = dataset["""train"""]["""audio"""][-1]["""array"""] SCREAMING_SNAKE_CASE__ = audio_classifier(__A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(__A ) , [{"""score""": 0.5_0_1, """label""": """Sound of a dog"""}, {"""score""": 0.4_9_9, """label""": """Sound of vaccum cleaner"""}] , ) @unittest.skip("""No models are available in TF""" ) def _snake_case ( self :Dict ) -> List[str]: """simple docstring""" pass @slow @require_torch def _snake_case ( self :Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = pipeline( task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , ) # This is an audio of a dog SCREAMING_SNAKE_CASE__ = load_dataset("""ashraq/esc50""" ) SCREAMING_SNAKE_CASE__ = dataset["""train"""]["""audio"""][-1]["""array"""] SCREAMING_SNAKE_CASE__ = audio_classifier(__A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(__A ) , [ {"""score""": 0.9_9_9, """label""": """Sound of a dog"""}, {"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""}, ] , ) SCREAMING_SNAKE_CASE__ = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(__A ) , [ [ {"""score""": 0.9_9_9, """label""": """Sound of a dog"""}, {"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) SCREAMING_SNAKE_CASE__ = audio_classifier( [audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 ) self.assertEqual( nested_simplify(__A ) , [ [ {"""score""": 0.9_9_9, """label""": """Sound of a dog"""}, {"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) @unittest.skip("""No models are available in TF""" ) def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" pass
702
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ): return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = ArgumentParser( """HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help="""datasets-cli command helpers""" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(UpperCamelCase__ ) EnvironmentCommand.register_subcommand(UpperCamelCase__ ) TestCommand.register_subcommand(UpperCamelCase__ ) RunBeamCommand.register_subcommand(UpperCamelCase__ ) DummyDataCommand.register_subcommand(UpperCamelCase__ ) # Parse args SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_known_args() if not hasattr(UpperCamelCase__ , """func""" ): parser.print_help() exit(1 ) SCREAMING_SNAKE_CASE__ = parse_unknown_args(UpperCamelCase__ ) # Run SCREAMING_SNAKE_CASE__ = args.func(UpperCamelCase__ , **UpperCamelCase__ ) service.run() if __name__ == "__main__": main()
59
0
"""simple docstring""" import argparse import collections import json import os import re import string import sys import numpy as np a :Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE) a :List[str] = None def _lowercase ( ) -> List[str]: SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" ) parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" ) parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" ) parser.add_argument( """--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" ) parser.add_argument( """--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" ) parser.add_argument( """--na-prob-thresh""" , """-t""" , type=UpperCAmelCase_ , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , ) parser.add_argument( """--out-image-dir""" , """-p""" , metavar="""out_images""" , default=UpperCAmelCase_ , help="""Save precision-recall curves to directory.""" ) parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : int = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE__ : Optional[int] = bool(qa["""answers"""]["""text"""] ) return qid_to_has_ans def _lowercase ( __lowerCAmelCase ) -> Optional[Any]: def remove_articles(__lowerCAmelCase ): return ARTICLES_REGEX.sub(""" """ , UpperCAmelCase_ ) def white_space_fix(__lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Any = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase_ ) ) ) ) def _lowercase ( __lowerCAmelCase ) -> Tuple: if not s: return [] return normalize_answer(UpperCAmelCase_ ).split() def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: return int(normalize_answer(UpperCAmelCase_ ) == normalize_answer(UpperCAmelCase_ ) ) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[Any] = get_tokens(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ : int = get_tokens(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[str] = collections.Counter(UpperCAmelCase_ ) & collections.Counter(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(common.values() ) if len(UpperCAmelCase_ ) == 0 or len(UpperCAmelCase_ ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE__ : Tuple = 1.0 * num_same / len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[Any] = 1.0 * num_same / len(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = (2 * precision * recall) / (precision + recall) return fa def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int = {} SCREAMING_SNAKE_CASE__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE__ : Any = qa['''id'''] SCREAMING_SNAKE_CASE__ : Tuple = [t for t in qa['''answers''']['''text'''] if normalize_answer(UpperCAmelCase_ )] if not gold_answers: # For unanswerable questions, only correct answer is empty string SCREAMING_SNAKE_CASE__ : Any = [''''''] if qid not in preds: print(F'''Missing prediction for {qid}''' ) continue SCREAMING_SNAKE_CASE__ : Tuple = preds[qid] # Take max over all gold answers SCREAMING_SNAKE_CASE__ : List[Any] = max(compute_exact(UpperCAmelCase_ , UpperCAmelCase_ ) for a in gold_answers ) SCREAMING_SNAKE_CASE__ : Optional[Any] = max(compute_fa(UpperCAmelCase_ , UpperCAmelCase_ ) for a in gold_answers ) return exact_scores, fa_scores def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: SCREAMING_SNAKE_CASE__ : int = {} for qid, s in scores.items(): SCREAMING_SNAKE_CASE__ : Optional[int] = na_probs[qid] > na_prob_thresh if pred_na: SCREAMING_SNAKE_CASE__ : Tuple = float(not qid_to_has_ans[qid] ) else: SCREAMING_SNAKE_CASE__ : int = s return new_scores def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> int: if not qid_list: SCREAMING_SNAKE_CASE__ : int = len(UpperCAmelCase_ ) return collections.OrderedDict( [ ("""exact""", 100.0 * sum(exact_scores.values() ) / total), ("""f1""", 100.0 * sum(fa_scores.values() ) / total), ("""total""", total), ] ) else: SCREAMING_SNAKE_CASE__ : Any = len(UpperCAmelCase_ ) return collections.OrderedDict( [ ("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("""total""", total), ] ) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: for k in new_eval: SCREAMING_SNAKE_CASE__ : Tuple = new_eval[k] def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: plt.step(UpperCAmelCase_ , UpperCAmelCase_ , color="""b""" , alpha=0.2 , where="""post""" ) plt.fill_between(UpperCAmelCase_ , UpperCAmelCase_ , step="""post""" , alpha=0.2 , color="""b""" ) plt.xlabel("""Recall""" ) plt.ylabel("""Precision""" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(UpperCAmelCase_ ) plt.savefig(UpperCAmelCase_ ) plt.clf() def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> int: SCREAMING_SNAKE_CASE__ : List[Any] = sorted(UpperCAmelCase_ , key=lambda __lowerCAmelCase : na_probs[k] ) SCREAMING_SNAKE_CASE__ : Dict = 0.0 SCREAMING_SNAKE_CASE__ : Optional[Any] = 1.0 SCREAMING_SNAKE_CASE__ : List[Any] = 0.0 SCREAMING_SNAKE_CASE__ : Tuple = [1.0] SCREAMING_SNAKE_CASE__ : List[str] = [0.0] SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0 for i, qid in enumerate(UpperCAmelCase_ ): if qid_to_has_ans[qid]: true_pos += scores[qid] SCREAMING_SNAKE_CASE__ : List[str] = true_pos / float(i + 1 ) SCREAMING_SNAKE_CASE__ : Tuple = true_pos / float(UpperCAmelCase_ ) if i == len(UpperCAmelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(UpperCAmelCase_ ) recalls.append(UpperCAmelCase_ ) if out_image: plot_pr_curve(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return {"ap": 100.0 * avg_prec} def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str: if out_image_dir and not os.path.exists(UpperCAmelCase_ ): os.makedirs(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return SCREAMING_SNAKE_CASE__ : str = make_precision_recall_eval( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , ) SCREAMING_SNAKE_CASE__ : List[str] = make_precision_recall_eval( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {k: float(UpperCAmelCase_ ) for k, v in qid_to_has_ans.items()} SCREAMING_SNAKE_CASE__ : Optional[int] = make_precision_recall_eval( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , ) merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , """pr_exact""" ) merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , """pr_f1""" ) merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , """pr_oracle""" ) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str: if not qid_list: return SCREAMING_SNAKE_CASE__ : Tuple = [na_probs[k] for k in qid_list] SCREAMING_SNAKE_CASE__ : str = np.ones_like(UpperCAmelCase_ ) / float(len(UpperCAmelCase_ ) ) plt.hist(UpperCAmelCase_ , weights=UpperCAmelCase_ , bins=20 , range=(0.0, 1.0) ) plt.xlabel("""Model probability of no-answer""" ) plt.ylabel("""Proportion of dataset""" ) plt.title(F'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(UpperCAmelCase_ , F'''na_prob_hist_{name}.png''' ) ) plt.clf() def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: SCREAMING_SNAKE_CASE__ : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) SCREAMING_SNAKE_CASE__ : Optional[int] = num_no_ans SCREAMING_SNAKE_CASE__ : List[Any] = cur_score SCREAMING_SNAKE_CASE__ : Any = 0.0 SCREAMING_SNAKE_CASE__ : int = sorted(UpperCAmelCase_ , key=lambda __lowerCAmelCase : na_probs[k] ) for i, qid in enumerate(UpperCAmelCase_ ): if qid not in scores: continue if qid_to_has_ans[qid]: SCREAMING_SNAKE_CASE__ : List[str] = scores[qid] else: if preds[qid]: SCREAMING_SNAKE_CASE__ : Union[str, Any] = -1 else: SCREAMING_SNAKE_CASE__ : List[Any] = 0 cur_score += diff if cur_score > best_score: SCREAMING_SNAKE_CASE__ : Tuple = cur_score SCREAMING_SNAKE_CASE__ : Tuple = na_probs[qid] return 100.0 * best_score / len(UpperCAmelCase_ ), best_thresh def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Any = find_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[Any] = find_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[Any] = best_exact SCREAMING_SNAKE_CASE__ : Optional[Any] = exact_thresh SCREAMING_SNAKE_CASE__ : Union[str, Any] = best_fa SCREAMING_SNAKE_CASE__ : Tuple = fa_thresh def _lowercase ( ) -> Tuple: with open(OPTS.data_file ) as f: SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ : int = dataset_json['''data'''] with open(OPTS.pred_file ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] = json.load(UpperCAmelCase_ ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: SCREAMING_SNAKE_CASE__ : Dict = json.load(UpperCAmelCase_ ) else: SCREAMING_SNAKE_CASE__ : str = {k: 0.0 for k in preds} SCREAMING_SNAKE_CASE__ : Optional[int] = make_qid_to_has_ans(UpperCAmelCase_ ) # maps qid to True/False SCREAMING_SNAKE_CASE__ : int = [k for k, v in qid_to_has_ans.items() if v] SCREAMING_SNAKE_CASE__ : str = [k for k, v in qid_to_has_ans.items() if not v] SCREAMING_SNAKE_CASE__ : Any = get_raw_scores(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[str] = apply_no_ans_threshold(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.na_prob_thresh ) SCREAMING_SNAKE_CASE__ : int = apply_no_ans_threshold(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.na_prob_thresh ) SCREAMING_SNAKE_CASE__ : int = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ ) if has_ans_qids: SCREAMING_SNAKE_CASE__ : List[Any] = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ , qid_list=UpperCAmelCase_ ) merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , """HasAns""" ) if no_ans_qids: SCREAMING_SNAKE_CASE__ : List[str] = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ , qid_list=UpperCAmelCase_ ) merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , """NoAns""" ) if OPTS.na_prob_file: find_all_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir ) histogram_na_prob(UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir , """hasAns""" ) histogram_na_prob(UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir , """noAns""" ) if OPTS.out_file: with open(OPTS.out_file , """w""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) else: print(json.dumps(UpperCAmelCase_ , indent=2 ) ) if __name__ == "__main__": a :List[Any] = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt main()
680
import argparse import json from tqdm import tqdm def UpperCamelCase__ ( ) -> Union[str, Any]: '''simple docstring''' _lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--src_path''' , type=UpperCAmelCase_ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , ) parser.add_argument( '''--evaluation_set''' , type=UpperCAmelCase_ , help='''where to store parsed evaluation_set file''' , ) parser.add_argument( '''--gold_data_path''' , type=UpperCAmelCase_ , help='''where to store parsed gold_data_path file''' , ) _lowercase : Dict = parser.parse_args() with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open( args.gold_data_path , '''w''' ) as gold_file: _lowercase : str = json.load(UpperCAmelCase_ ) for dpr_record in tqdm(UpperCAmelCase_ ): _lowercase : Optional[Any] = dpr_record['''question'''] _lowercase : Union[str, Any] = [context['''title'''] for context in dpr_record['''positive_ctxs''']] eval_file.write(question + '''\n''' ) gold_file.write('''\t'''.join(UpperCAmelCase_ ) + '''\n''' ) if __name__ == "__main__": main()
322
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor __lowerCamelCase = logging.get_logger(__name__) class _lowercase ( __UpperCAmelCase ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ): warnings.warn( '''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use DeformableDetrImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
190
"""simple docstring""" from __future__ import annotations from random import choice def lowercase ( __UpperCamelCase ) -> Any: return choice(__UpperCamelCase ) def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> int: __magic_name__ = random_pivot(__UpperCamelCase ) # partition based on pivot # linear time __magic_name__ = [e for e in lst if e < pivot] __magic_name__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(__UpperCamelCase ) == k - 1: return pivot # pivot is in elements bigger than k elif len(__UpperCamelCase ) < k - 1: return kth_number(__UpperCamelCase , k - len(__UpperCamelCase ) - 1 ) # pivot is in elements smaller than k else: return kth_number(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
190
1
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP snake_case__ : Any = False try: snake_case__ : Dict = _is_package_available("""google.colab""") except ModuleNotFoundError: pass @input.register class _A : '''simple docstring''' def __init__( self : List[str] , lowerCamelCase : str = None , lowerCamelCase : list = [] ): '''simple docstring''' __lowercase = 0 __lowercase = choices __lowercase = prompt if sys.platform == "win32": __lowercase = "*" else: __lowercase = "➔ " def _snake_case ( self : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : str = "" ): '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index] , 32 , lowerCamelCase ) else: forceWrite(self.choices[index] , lowerCamelCase ) def _snake_case ( self : Any , lowerCamelCase : int ): '''simple docstring''' if index == self.position: forceWrite(f""" {self.arrow_char} """ ) self.write_choice(lowerCamelCase ) else: forceWrite(f""" {self.choices[index]}""" ) reset_cursor() def _snake_case ( self : int , lowerCamelCase : Direction , lowerCamelCase : int = 1 ): '''simple docstring''' __lowercase = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(lowerCamelCase ) move_cursor(lowerCamelCase , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def _snake_case ( self : int ): '''simple docstring''' self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def _snake_case ( self : Optional[int] ): '''simple docstring''' move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def _snake_case ( self : Union[str, Any] ): '''simple docstring''' move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(lowerCamelCase )] for number in range(10 )] ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowercase = int(chr(self.current_selection ) ) __lowercase = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , lowerCamelCase ) else: return else: return def _snake_case ( self : str , lowerCamelCase : int = 0 ): '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) __lowercase = default_choice for i in range(len(self.choices ) ): self.print_choice(lowerCamelCase ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: __lowercase = int(builtins.input() ) except ValueError: __lowercase = default_choice else: __lowercase = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(lowerCamelCase , "\n" ) return choice
402
from math import sqrt def snake_case_ ( _SCREAMING_SNAKE_CASE ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0_0_1 ): __lowercase = 0 __lowercase = 1 while count != nth and number < 3: number += 1 if is_prime(_SCREAMING_SNAKE_CASE ): count += 1 while count != nth: number += 2 if is_prime(_SCREAMING_SNAKE_CASE ): count += 1 return number if __name__ == "__main__": print(F'''{solution() = }''')
402
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( lowercase , lowercase ) -> int: '''simple docstring''' UpperCamelCase = args.log_outputs UpperCamelCase = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric UpperCamelCase = load_metric('wer' ) UpperCamelCase = load_metric('cer' ) # compute metrics UpperCamelCase = wer.compute(references=result['target'] , predictions=result['prediction'] ) UpperCamelCase = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results UpperCamelCase = f'''WER: {wer_result}\nCER: {cer_result}''' print(lowercase ) with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(lowercase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCamelCase = f'''log_{dataset_id}_predictions.txt''' UpperCamelCase = f'''log_{dataset_id}_targets.txt''' with open(lowercase , 'w' ) as p, open(lowercase , 'w' ) as t: # mapping function to write output def write_to_file(lowercase , lowercase ): p.write(f'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(lowercase , with_indices=lowercase ) def A ( lowercase ) -> str: '''simple docstring''' UpperCamelCase = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCamelCase = re.sub(lowercase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCamelCase = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: UpperCamelCase = ' '.join(text.split(lowercase ) ) return text def A ( lowercase ) -> Dict: '''simple docstring''' UpperCamelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCamelCase = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCamelCase = feature_extractor.sampling_rate # resample audio UpperCamelCase = dataset.cast_column('audio' , Audio(sampling_rate=lowercase ) ) # load eval pipeline if args.device is None: UpperCamelCase = 0 if torch.cuda.is_available() else -1 UpperCamelCase = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase ): UpperCamelCase = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCamelCase = prediction['text'] UpperCamelCase = normalize_text(batch['sentence'] ) return batch # run inference on all examples UpperCamelCase = dataset.map(lowercase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase , lowercase ) if __name__ == "__main__": _UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) _UpperCAmelCase : Any = parser.parse_args() main(args)
3
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = { "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json", } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : Dict = "data2vec-text" def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any: """simple docstring""" super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = hidden_act UpperCamelCase = intermediate_size UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = position_embedding_type UpperCamelCase = use_cache UpperCamelCase = classifier_dropout class lowercase ( _SCREAMING_SNAKE_CASE ): @property def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
3
1
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = XLMTokenizer __a = False def UpperCamelCase_ ( self ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__: List[Any]= [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] SCREAMING_SNAKE_CASE__: Tuple= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE__: Union[str, Any]= ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] SCREAMING_SNAKE_CASE__: List[Any]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE__: List[Any]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase ) ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Any: SCREAMING_SNAKE_CASE__: Optional[Any]= '''lower newer''' SCREAMING_SNAKE_CASE__: Any= '''lower newer''' return input_text, output_text def UpperCamelCase_ ( self ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Optional[int]= XLMTokenizer(self.vocab_file , self.merges_file ) SCREAMING_SNAKE_CASE__: Tuple= '''lower''' SCREAMING_SNAKE_CASE__: Optional[Any]= ['''low''', '''er</w>'''] SCREAMING_SNAKE_CASE__: Any= tokenizer.tokenize(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= tokens + ['''<unk>'''] SCREAMING_SNAKE_CASE__: str= [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase ) @slow def UpperCamelCase_ ( self ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Optional[int]= XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) SCREAMING_SNAKE_CASE__: Tuple= tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: str= tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: str= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
64
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets UpperCAmelCase : Optional[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ UpperCAmelCase : Any = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ UpperCAmelCase : Union[str, Any] = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def _A ( SCREAMING_SNAKE_CASE : str ): """simple docstring""" def remove_articles(SCREAMING_SNAKE_CASE : Optional[Any] ): a__ : Any =re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(SCREAMING_SNAKE_CASE , " " , SCREAMING_SNAKE_CASE ) def white_space_fix(SCREAMING_SNAKE_CASE : Optional[Any] ): return " ".join(text.split() ) def remove_punc(SCREAMING_SNAKE_CASE : List[Any] ): a__ : List[Any] =set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(SCREAMING_SNAKE_CASE : Union[str, Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE ) ) ) ) def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" return int(normalize_answer(SCREAMING_SNAKE_CASE ) == normalize_answer(SCREAMING_SNAKE_CASE ) ) def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" a__ : Any =[any(compute_exact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] return (sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE )) * 100 def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" a__ : str =[rgram for rgrams in rgramslist for rgram in rgrams] a__ : List[str] =Counter(SCREAMING_SNAKE_CASE ) a__ : List[Any] =Counter(SCREAMING_SNAKE_CASE ) a__ : Any =Counter() for sgram, scount in sgramcounter.items(): a__ : List[str] =scount * numref a__ : List[str] =Counter(SCREAMING_SNAKE_CASE ) a__ : Optional[Any] =Counter() for cgram, ccount in cgramcounter.items(): a__ : int =ccount * numref # KEEP a__ : Any =sgramcounter_rep & cgramcounter_rep a__ : List[str] =keepgramcounter_rep & rgramcounter a__ : str =sgramcounter_rep & rgramcounter a__ : Optional[int] =0 a__ : List[Any] =0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. a__ : Tuple =1 a__ : Dict =1 if len(SCREAMING_SNAKE_CASE ) > 0: a__ : List[str] =keeptmpscorea / len(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) a__ : int =keeptmpscorea / sum(keepgramcounterall_rep.values() ) a__ : Tuple =0 if keepscore_precision > 0 or keepscore_recall > 0: a__ : Any =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION a__ : Optional[Any] =sgramcounter_rep - cgramcounter_rep a__ : Optional[Any] =delgramcounter_rep - rgramcounter a__ : Optional[int] =sgramcounter_rep - rgramcounter a__ : int =0 a__ : Dict =0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. a__ : Any =1 if len(SCREAMING_SNAKE_CASE ) > 0: a__ : Optional[Any] =deltmpscorea / len(SCREAMING_SNAKE_CASE ) # ADDITION a__ : Union[str, Any] =set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) a__ : List[Any] =set(SCREAMING_SNAKE_CASE ) & set(SCREAMING_SNAKE_CASE ) a__ : Tuple =set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) a__ : Any =0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. a__ : int =1 a__ : Dict =1 if len(SCREAMING_SNAKE_CASE ) > 0: a__ : Optional[int] =addtmpscore / len(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: a__ : List[str] =addtmpscore / len(SCREAMING_SNAKE_CASE ) a__ : List[str] =0 if addscore_precision > 0 or addscore_recall > 0: a__ : Optional[Any] =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" a__ : int =len(SCREAMING_SNAKE_CASE ) a__ : Tuple =ssent.split(" " ) a__ : str =csent.split(" " ) a__ : List[Any] =[] a__ : int =[] a__ : List[Any] =[] a__ : Any =[] a__ : List[Any] =[] a__ : Any =[] a__ : Union[str, Any] =[] a__ : Union[str, Any] =[] a__ : Union[str, Any] =[] a__ : Tuple =[] for rsent in rsents: a__ : Optional[int] =rsent.split(" " ) a__ : Tuple =[] a__ : Tuple =[] a__ : int =[] ragramslist.append(SCREAMING_SNAKE_CASE ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ): if i < len(SCREAMING_SNAKE_CASE ) - 1: a__ : Union[str, Any] =ragrams[i] + " " + ragrams[i + 1] ragrams.append(SCREAMING_SNAKE_CASE ) if i < len(SCREAMING_SNAKE_CASE ) - 2: a__ : Optional[int] =ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(SCREAMING_SNAKE_CASE ) if i < len(SCREAMING_SNAKE_CASE ) - 3: a__ : List[Any] =ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(SCREAMING_SNAKE_CASE ) ragramslist.append(SCREAMING_SNAKE_CASE ) ragramslist.append(SCREAMING_SNAKE_CASE ) ragramslist.append(SCREAMING_SNAKE_CASE ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ): if i < len(SCREAMING_SNAKE_CASE ) - 1: a__ : str =sagrams[i] + " " + sagrams[i + 1] sagrams.append(SCREAMING_SNAKE_CASE ) if i < len(SCREAMING_SNAKE_CASE ) - 2: a__ : Dict =sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(SCREAMING_SNAKE_CASE ) if i < len(SCREAMING_SNAKE_CASE ) - 3: a__ : Any =sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(SCREAMING_SNAKE_CASE ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ): if i < len(SCREAMING_SNAKE_CASE ) - 1: a__ : List[Any] =cagrams[i] + " " + cagrams[i + 1] cagrams.append(SCREAMING_SNAKE_CASE ) if i < len(SCREAMING_SNAKE_CASE ) - 2: a__ : List[str] =cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(SCREAMING_SNAKE_CASE ) if i < len(SCREAMING_SNAKE_CASE ) - 3: a__ : Optional[int] =cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(SCREAMING_SNAKE_CASE ) ((a__) , (a__) , (a__)) : Optional[Any] =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ((a__) , (a__) , (a__)) : Dict =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ((a__) , (a__) , (a__)) : List[str] =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ((a__) , (a__) , (a__)) : Dict =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ : Tuple =sum([keepascore, keepascore, keepascore, keepascore] ) / 4 a__ : Tuple =sum([delascore, delascore, delascore, delascore] ) / 4 a__ : int =sum([addascore, addascore, addascore, addascore] ) / 4 a__ : Optional[int] =(avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "13a" , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" if lowercase: a__ : Optional[int] =sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: a__ : Any =sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE )()(SCREAMING_SNAKE_CASE ) else: a__ : Any =sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE ) elif tokenizer == "moses": a__ : Dict =sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE , return_str=SCREAMING_SNAKE_CASE , escape=SCREAMING_SNAKE_CASE ) elif tokenizer == "penn": a__ : Optional[int] =sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE , return_str=SCREAMING_SNAKE_CASE ) else: a__ : Dict =sentence if not return_str: a__ : List[Any] =normalized_sent.split() return normalized_sent def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" if not (len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )): raise ValueError("Sources length must match predictions and references lengths." ) a__ : Dict =0 for src, pred, refs in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE ) , normalize(SCREAMING_SNAKE_CASE ) , [normalize(SCREAMING_SNAKE_CASE ) for sent in refs] ) a__ : Tuple =sari_score / len(SCREAMING_SNAKE_CASE ) return 100 * sari_score def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple="exp" , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=False , ): """simple docstring""" a__ : int =len(references[0] ) if any(len(SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) a__ : List[str] =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE )] a__ : Optional[int] =sacrebleu.corpus_bleu( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , smooth_method=SCREAMING_SNAKE_CASE , smooth_value=SCREAMING_SNAKE_CASE , force=SCREAMING_SNAKE_CASE , lowercase=SCREAMING_SNAKE_CASE , use_effective_order=SCREAMING_SNAKE_CASE , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __lowerCAmelCase ( datasets.Metric): def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' a__ : List[Any] ={} result.update({"sari": compute_sari(sources=lowerCAmelCase__ , predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} ) result.update({"exact": compute_em(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} ) return result
563
0
'''simple docstring''' from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata __a: int = '' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class SCREAMING_SNAKE_CASE__ ( tr.AbstractTransform ): '''simple docstring''' def __init__( self : Dict , lowerCamelCase : int = " " ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = sentence_delimiter def lowerCamelCase ( self : List[Any] , lowerCamelCase : Any ) -> List[Any]: """simple docstring""" return list(lowerCamelCase ) def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Dict ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = [] for sent_idx, sentence in enumerate(lowerCamelCase ): chars.extend(self.process_string(lowerCamelCase ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase ) - 1: chars.append(self.sentence_delimiter ) return chars __a: Optional[int] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: __a: List[Any] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) __a: Dict = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __a: Optional[Any] = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n' __a: int = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): '''simple docstring''' def lowerCamelCase ( self : Dict ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", """https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""", ] , ) def lowerCamelCase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple=False ) -> Any: """simple docstring""" if concatenate_texts: return jiwer.compute_measures( lowerCamelCase , lowerCamelCase , truth_transform=lowerCamelCase , hypothesis_transform=lowerCamelCase , )["wer"] _UpperCAmelCase = 0 _UpperCAmelCase = 0 for prediction, reference in zip(lowerCamelCase , lowerCamelCase ): _UpperCAmelCase = jiwer.compute_measures( lowerCamelCase , lowerCamelCase , truth_transform=lowerCamelCase , hypothesis_transform=lowerCamelCase , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
719
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class SCREAMING_SNAKE_CASE__ : '''simple docstring''' _lowerCamelCase = BlenderbotSmallConfig _lowerCamelCase = {} _lowerCamelCase = '''gelu''' def __init__( self : int , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=13 , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Dict=True , lowerCamelCase : List[str]=False , lowerCamelCase : List[Any]=99 , lowerCamelCase : Tuple=32 , lowerCamelCase : List[str]=2 , lowerCamelCase : Tuple=4 , lowerCamelCase : List[Any]=37 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Optional[int]=20 , lowerCamelCase : Any=2 , lowerCamelCase : Union[str, Any]=1 , lowerCamelCase : int=0 , ) -> Dict: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def lowerCamelCase ( self : Any ) -> int: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_blenderbot_small_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return config, inputs_dict def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> int: """simple docstring""" _UpperCAmelCase = TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder() _UpperCAmelCase = inputs_dict["""input_ids"""] _UpperCAmelCase = input_ids[:1, :] _UpperCAmelCase = inputs_dict["""attention_mask"""][:1, :] _UpperCAmelCase = inputs_dict["""head_mask"""] _UpperCAmelCase = 1 # first forward pass _UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , use_cache=lowerCamelCase ) _UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) _UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase )[0] _UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx] _UpperCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCamelCase , lowerCamelCase , rtol=1E-3 ) def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , ) -> int: if attention_mask is None: _UpperCAmelCase = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) _lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () _lowerCamelCase = ( { '''conversational''': TFBlenderbotSmallForConditionalGeneration, '''feature-extraction''': TFBlenderbotSmallModel, '''summarization''': TFBlenderbotSmallForConditionalGeneration, '''text2text-generation''': TFBlenderbotSmallForConditionalGeneration, '''translation''': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = False def lowerCamelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" _UpperCAmelCase = TFBlenderbotSmallModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase ) def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase ) @require_tokenizers @require_tf class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase = [ '''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ''' ''' i\'m going to throw up.\nand why is that?''' ] _lowerCamelCase = '''facebook/blenderbot_small-90M''' @cached_property def lowerCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) @cached_property def lowerCamelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def lowerCamelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.tokenizer(self.src_text , return_tensors="""tf""" ) _UpperCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCamelCase , ) _UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCamelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
402
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
231
"""simple docstring""" from __future__ import annotations import math from collections.abc import Callable def lowercase ( __snake_case : Callable[[int | float], int | float] , __snake_case : int | float , __snake_case : int | float , __snake_case : int = 1_0_0 , ): lowercase_ : Any = x_start lowercase_ : Optional[Any] = fnc(__snake_case ) lowercase_ : Any = 0.0 for _ in range(__snake_case ): # Approximates curve as a sequence of linear lines and sums their length lowercase_ : str = (x_end - x_start) / steps + xa lowercase_ : Dict = fnc(__snake_case ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowercase_ : Optional[int] = xa lowercase_ : Union[str, Any] = fxa return length if __name__ == "__main__": def lowercase ( __snake_case : str ): return math.sin(1_0 * x ) print('''f(x) = sin(10 * x)''') print('''The length of the curve from x = -10 to x = 10 is:''') __A : Optional[Any] = 10 while i <= 100_000: print(F"""With {i} steps: {line_length(f, -10, 10, i)}""") i *= 10
231
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class SCREAMING_SNAKE_CASE_ ( snake_case__ ): """simple docstring""" __snake_case : str = """sew""" def __init__( self :Any , __lowercase :int=32 , __lowercase :Union[str, Any]=768 , __lowercase :List[str]=12 , __lowercase :Tuple=12 , __lowercase :List[Any]=3072 , __lowercase :Optional[Any]=2 , __lowercase :Tuple="gelu" , __lowercase :Tuple=0.1 , __lowercase :List[str]=0.1 , __lowercase :int=0.1 , __lowercase :Optional[Any]=0.0 , __lowercase :str=0.1 , __lowercase :Tuple=0.1 , __lowercase :Union[str, Any]=0.02 , __lowercase :Optional[int]=1e-5 , __lowercase :Union[str, Any]="group" , __lowercase :List[Any]="gelu" , __lowercase :int=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __lowercase :Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowercase :Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowercase :Tuple=False , __lowercase :Optional[Any]=128 , __lowercase :List[str]=16 , __lowercase :Any=True , __lowercase :Tuple=0.05 , __lowercase :Any=10 , __lowercase :Optional[Any]=2 , __lowercase :Optional[Any]=0.0 , __lowercase :Any=10 , __lowercase :Optional[Any]=0 , __lowercase :Any="mean" , __lowercase :List[Any]=False , __lowercase :List[Any]=False , __lowercase :Dict=256 , __lowercase :Optional[int]=0 , __lowercase :Optional[Any]=1 , __lowercase :List[str]=2 , **__lowercase :List[str] , ): super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase ) __lowerCamelCase : Dict =hidden_size __lowerCamelCase : Any =feat_extract_norm __lowerCamelCase : str =feat_extract_activation __lowerCamelCase : Dict =list(__lowercase ) __lowerCamelCase : Tuple =list(__lowercase ) __lowerCamelCase : int =list(__lowercase ) __lowerCamelCase : Optional[Any] =conv_bias __lowerCamelCase : Tuple =num_conv_pos_embeddings __lowerCamelCase : Tuple =num_conv_pos_embedding_groups __lowerCamelCase : str =len(self.conv_dim ) __lowerCamelCase : str =num_hidden_layers __lowerCamelCase : Optional[Any] =intermediate_size __lowerCamelCase : str =squeeze_factor __lowerCamelCase : Dict =hidden_act __lowerCamelCase : List[str] =num_attention_heads __lowerCamelCase : List[Any] =hidden_dropout __lowerCamelCase : str =attention_dropout __lowerCamelCase : Tuple =activation_dropout __lowerCamelCase : int =feat_proj_dropout __lowerCamelCase : List[str] =final_dropout __lowerCamelCase : int =layerdrop __lowerCamelCase : str =layer_norm_eps __lowerCamelCase : List[Any] =initializer_range __lowerCamelCase : Dict =vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase : List[str] =apply_spec_augment __lowerCamelCase : str =mask_time_prob __lowerCamelCase : Optional[Any] =mask_time_length __lowerCamelCase : List[str] =mask_time_min_masks __lowerCamelCase : Optional[Any] =mask_feature_prob __lowerCamelCase : Any =mask_feature_length __lowerCamelCase : Tuple =mask_feature_min_masks # ctc loss __lowerCamelCase : int =ctc_loss_reduction __lowerCamelCase : Tuple =ctc_zero_infinity # sequence classification __lowerCamelCase : Tuple =use_weighted_layer_sum __lowerCamelCase : Dict =classifier_proj_size @property def __lowercase ( self :Dict ): return functools.reduce(operator.mul , self.conv_stride , 1 )
363
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE_ ( snake_case__ ): """simple docstring""" __snake_case : Optional[Any] = (UnCLIPScheduler,) def __lowercase ( self :Any , **__lowercase :str ): __lowerCamelCase : Optional[int] ={ '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**__lowercase ) return config def __lowercase ( self :Tuple ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__lowercase ) def __lowercase ( self :Tuple ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=__lowercase ) def __lowercase ( self :List[str] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowercase ) def __lowercase ( self :List[Any] ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=__lowercase ) def __lowercase ( self :Any ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=__lowercase ) def __lowercase ( self :Union[str, Any] ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=__lowercase , prev_timestep=__lowercase ) def __lowercase ( self :Union[str, Any] ): __lowerCamelCase : List[Any] =self.scheduler_classes[0] __lowerCamelCase : str =self.get_scheduler_config(variance_type='''fixed_small_log''' ) __lowerCamelCase : Tuple =scheduler_class(**__lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5 def __lowercase ( self :Optional[Any] ): __lowerCamelCase : Tuple =self.scheduler_classes[0] __lowerCamelCase : Optional[Any] =self.get_scheduler_config(variance_type='''learned_range''' ) __lowerCamelCase : Optional[Any] =scheduler_class(**__lowercase ) __lowerCamelCase : Optional[int] =0.5 assert scheduler._get_variance(1 , predicted_variance=__lowercase ) - -10.1712790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=__lowercase ) - -5.7998052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=__lowercase ) - -0.0010011 < 1e-5 def __lowercase ( self :Optional[Any] ): __lowerCamelCase : Any =self.scheduler_classes[0] __lowerCamelCase : Optional[int] =self.get_scheduler_config() __lowerCamelCase : str =scheduler_class(**__lowercase ) __lowerCamelCase : Tuple =scheduler.timesteps __lowerCamelCase : Any =self.dummy_model() __lowerCamelCase : Any =self.dummy_sample_deter __lowerCamelCase : List[Any] =torch.manual_seed(0 ) for i, t in enumerate(__lowercase ): # 1. predict noise residual __lowerCamelCase : Tuple =model(__lowercase , __lowercase ) # 2. predict previous mean of sample x_t-1 __lowerCamelCase : List[Any] =scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase ).prev_sample __lowerCamelCase : List[Any] =pred_prev_sample __lowerCamelCase : Optional[Any] =torch.sum(torch.abs(__lowercase ) ) __lowerCamelCase : Optional[int] =torch.mean(torch.abs(__lowercase ) ) assert abs(result_sum.item() - 252.2682495 ) < 1e-2 assert abs(result_mean.item() - 0.3284743 ) < 1e-3 def __lowercase ( self :int ): __lowerCamelCase : Optional[Any] =self.scheduler_classes[0] __lowerCamelCase : Any =self.get_scheduler_config() __lowerCamelCase : Optional[Any] =scheduler_class(**__lowercase ) scheduler.set_timesteps(25 ) __lowerCamelCase : Dict =scheduler.timesteps __lowerCamelCase : List[str] =self.dummy_model() __lowerCamelCase : List[str] =self.dummy_sample_deter __lowerCamelCase : Any =torch.manual_seed(0 ) for i, t in enumerate(__lowercase ): # 1. predict noise residual __lowerCamelCase : Optional[Any] =model(__lowercase , __lowercase ) if i + 1 == timesteps.shape[0]: __lowerCamelCase : Any =None else: __lowerCamelCase : Union[str, Any] =timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowerCamelCase : List[str] =scheduler.step( __lowercase , __lowercase , __lowercase , prev_timestep=__lowercase , generator=__lowercase ).prev_sample __lowerCamelCase : int =pred_prev_sample __lowerCamelCase : List[Any] =torch.sum(torch.abs(__lowercase ) ) __lowerCamelCase : List[str] =torch.mean(torch.abs(__lowercase ) ) assert abs(result_sum.item() - 258.2044983 ) < 1e-2 assert abs(result_mean.item() - 0.3362038 ) < 1e-3 def __lowercase ( self :List[str] ): pass def __lowercase ( self :Tuple ): pass
363
1
'''simple docstring''' from __future__ import annotations from random import random class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ) -> int: lowerCAmelCase__ : str = value lowerCAmelCase__ : str = random() lowerCAmelCase__ : Node | None = None lowerCAmelCase__ : Node | None = None def __repr__( self ) -> str: from pprint import pformat if self.left is None and self.right is None: return F"""'{self.value}: {self.prior:.5}'""" else: return pformat( {F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 ) def __str__( self ) -> str: lowerCAmelCase__ : Any = str(self.value ) + """ """ lowerCAmelCase__ : str = str(self.left or """""" ) lowerCAmelCase__ : Any = str(self.right or """""" ) return value + left + right def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: lowerCAmelCase__ , lowerCAmelCase__ : Dict = split(root.left , UpperCamelCase ) return left, root else: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = split(root.right , UpperCamelCase ) return root, right def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: lowerCAmelCase__ : Optional[int] = merge(left.right , UpperCamelCase ) return left else: lowerCAmelCase__ : Tuple = merge(UpperCamelCase , right.left ) return right def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[Any] = Node(UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = split(UpperCamelCase , UpperCamelCase ) return merge(merge(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : List[str] = split(UpperCamelCase , value - 1 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = split(UpperCamelCase , UpperCamelCase ) return merge(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not root: # None return else: inorder(root.left ) print(root.value , end=""",""" ) inorder(root.right ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" for arg in args.split(): if arg[0] == "+": lowerCAmelCase__ : Dict = insert(UpperCamelCase , int(arg[1:] ) ) elif arg[0] == "-": lowerCAmelCase__ : Optional[int] = erase(UpperCamelCase , int(arg[1:] ) ) else: print("""Unknown command""" ) return root def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Tuple = None print( """enter numbers to create a tree, + value to add value into treap, """ """- value to erase all nodes with value. 'q' to quit. """ ) lowerCAmelCase__ : List[str] = input() while args != "q": lowerCAmelCase__ : str = interact_treap(UpperCamelCase , UpperCamelCase ) print(UpperCamelCase ) lowerCAmelCase__ : Tuple = input() print("""good by!""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
565
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if exponent == 1: return base if exponent % 2 == 0: lowerCAmelCase__ : Any = _modexpt(UpperCamelCase , exponent // 2 , UpperCamelCase ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(UpperCamelCase , exponent - 1 , UpperCamelCase )) % modulo_value def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1777 , UpperCamelCase = 1855 , UpperCamelCase = 8 ): """simple docstring""" lowerCAmelCase__ : Optional[int] = base for _ in range(1 , UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = _modexpt(UpperCamelCase , UpperCamelCase , 10**digits ) return result if __name__ == "__main__": print(F"""{solution() = }""")
565
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase__ ( snake_case ): """simple docstring""" lowerCAmelCase__ : int = ['image_processor', 'tokenizer'] lowerCAmelCase__ : Tuple = 'Pix2StructImageProcessor' lowerCAmelCase__ : List[str] = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self: str , __lowerCAmelCase: int , __lowerCAmelCase: Any ) -> Any: '''simple docstring''' __UpperCAmelCase = False super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self: int , __lowerCAmelCase: Any=None , __lowerCAmelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase: bool = True , __lowerCAmelCase: Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase: Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase: Optional[int] = None , __lowerCAmelCase: Optional[int] = 2_048 , __lowerCAmelCase: int = 0 , __lowerCAmelCase: Optional[int] = None , __lowerCAmelCase: Optional[bool] = None , __lowerCAmelCase: bool = False , __lowerCAmelCase: bool = False , __lowerCAmelCase: bool = False , __lowerCAmelCase: bool = False , __lowerCAmelCase: bool = False , __lowerCAmelCase: bool = True , __lowerCAmelCase: Optional[Union[str, TensorType]] = None , **__lowerCAmelCase: Optional[Any] , ) -> BatchEncoding: '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None and not self.image_processor.is_vqa: __UpperCAmelCase = self.tokenizer __UpperCAmelCase = self.tokenizer( text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values __UpperCAmelCase = self.image_processor( __lowerCAmelCase , return_tensors=__lowerCAmelCase , max_patches=__lowerCAmelCase , **__lowerCAmelCase ) else: # add pixel_values and bbox __UpperCAmelCase = self.image_processor( __lowerCAmelCase , return_tensors=__lowerCAmelCase , max_patches=__lowerCAmelCase , header_text=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and not self.image_processor.is_vqa: __UpperCAmelCase = self.tokenizer( text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) if "attention_mask" in text_encoding: __UpperCAmelCase = text_encoding.pop("attention_mask" ) if "input_ids" in text_encoding: __UpperCAmelCase = text_encoding.pop("input_ids" ) else: __UpperCAmelCase = None if text_encoding is not None: encoding_image_processor.update(__lowerCAmelCase ) return encoding_image_processor def _UpperCAmelCase ( self: Dict , *__lowerCAmelCase: int , **__lowerCAmelCase: Union[str, Any] ) -> List[str]: '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def _UpperCAmelCase ( self: Union[str, Any] , *__lowerCAmelCase: str , **__lowerCAmelCase: int ) -> int: '''simple docstring''' return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def _UpperCAmelCase ( self: Optional[int] ) -> Dict: '''simple docstring''' __UpperCAmelCase = self.tokenizer.model_input_names __UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
286
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __lowerCAmelCase ( ) -> Optional[Any]: __UpperCAmelCase = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" __UpperCAmelCase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" ) return image def __lowerCAmelCase ( A_ : List[Any] ) -> List[str]: __UpperCAmelCase = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def __lowerCAmelCase ( A_ : Optional[int] , A_ : int , A_ : str ) -> List[Any]: __UpperCAmelCase = dct.pop(A_ ) __UpperCAmelCase = val def __lowerCAmelCase ( A_ : Optional[int] , A_ : Optional[int] ) -> int: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __UpperCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) __UpperCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict __UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(A_ , requires_grad=A_ ), v_bias) ) __UpperCAmelCase = qkv_bias def __lowerCAmelCase ( A_ : Any ) -> int: __UpperCAmelCase = 3_64 if "coco" in model_name else 2_24 __UpperCAmelCase = InstructBlipVisionConfig(image_size=A_ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: __UpperCAmelCase = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __UpperCAmelCase = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: __UpperCAmelCase = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_20_01 ).to_dict() elif "vicuna-13b" in model_name: __UpperCAmelCase = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_20_01 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 __UpperCAmelCase = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict() __UpperCAmelCase = InstructBlipConfig(vision_config=A_ , text_config=A_ , qformer_config=A_ ) return config, image_size @torch.no_grad() def __lowerCAmelCase ( A_ : int , A_ : Union[str, Any]=None , A_ : Optional[Any]=False ) -> Dict: __UpperCAmelCase = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: __UpperCAmelCase = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) __UpperCAmelCase = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) __UpperCAmelCase , __UpperCAmelCase = get_blipa_config(A_ ) __UpperCAmelCase = InstructBlipForConditionalGeneration(A_ ).eval() __UpperCAmelCase = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } __UpperCAmelCase , __UpperCAmelCase = model_name_to_original[model_name] # load original model print("Loading original model..." ) __UpperCAmelCase = "cuda:1" if torch.cuda.is_available() else "cpu" __UpperCAmelCase = "cuda:2" if torch.cuda.is_available() else "cpu" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = load_model_and_preprocess( name=A_ , model_type=A_ , is_eval=A_ , device=A_ ) original_model.eval() print("Done!" ) # update state dict keys __UpperCAmelCase = original_model.state_dict() __UpperCAmelCase = create_rename_keys(A_ ) for src, dest in rename_keys: rename_key(A_ , A_ , A_ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __UpperCAmelCase = state_dict.pop(A_ ) if key.startswith("Qformer.bert" ): __UpperCAmelCase = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: __UpperCAmelCase = key.replace("self" , "attention" ) if "llm_proj" in key: __UpperCAmelCase = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: __UpperCAmelCase = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): __UpperCAmelCase = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): __UpperCAmelCase = key.replace("t5" , "language" ) __UpperCAmelCase = val # read in qv biases read_in_q_v_bias(A_ , A_ ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(A_ , strict=A_ ) __UpperCAmelCase = load_demo_image() __UpperCAmelCase = "What is unusual about this image?" # create processor __UpperCAmelCase = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=A_ , image_std=A_ ) __UpperCAmelCase = InstructBlipProcessor( image_processor=A_ , tokenizer=A_ , qformer_tokenizer=A_ , ) __UpperCAmelCase = processor(images=A_ , text=A_ , return_tensors="pt" ).to(A_ ) # make sure processor creates exact same pixel values __UpperCAmelCase = vis_processors["eval"](A_ ).unsqueeze(0 ).to(A_ ) __UpperCAmelCase = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A_ ) original_model.to(A_ ) hf_model.to(A_ ) with torch.no_grad(): if "vicuna" in model_name: __UpperCAmelCase = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits __UpperCAmelCase = hf_model(**A_ ).logits else: __UpperCAmelCase = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits __UpperCAmelCase = tokenizer("\n" , return_tensors="pt" ).input_ids.to(A_ ) __UpperCAmelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 ) __UpperCAmelCase = hf_model(**A_ , labels=A_ ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape __UpperCAmelCase = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , A_ , atol=A_ ) print("Looks ok!" ) print("Generating with original model..." ) __UpperCAmelCase = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) __UpperCAmelCase = hf_model.generate( **A_ , do_sample=A_ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? __UpperCAmelCase = 2 print("Original generation:" , A_ ) __UpperCAmelCase = processor.batch_decode(A_ , skip_special_tokens=A_ ) __UpperCAmelCase = [text.strip() for text in output_text] print("HF generation:" , A_ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() a_ = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) a_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
286
1
def _a ( __UpperCamelCase : int ): return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") A__ : Union[str, Any] = int(input("""Enter number: """).strip()) print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
233
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(UpperCamelCase ): __UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[str] = FlaxAutoModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) @slow def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(UpperCamelCase ): __UpperCAmelCase : str = AutoConfig.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[int] = FlaxAutoModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: __UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX ) @jax.jit def eval(**UpperCamelCase : Any ): return model(**UpperCamelCase ) eval(**UpperCamelCase ).block_until_ready() @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = FlaxRobertaModel.from_pretrained(UpperCamelCase ) __UpperCAmelCase : str = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX ) @jax.jit def eval(**UpperCamelCase : Tuple ): return model(**UpperCamelCase ) eval(**UpperCamelCase ).block_until_ready() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase , """bert-base is not a local folder and is not a valid model identifier""" ): __UpperCAmelCase : Tuple = FlaxAutoModel.from_pretrained("""bert-base""" ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): __UpperCAmelCase : str = FlaxAutoModel.from_pretrained(UpperCamelCase , revision="""aaaaaa""" ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ): __UpperCAmelCase : str = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' with self.assertRaisesRegex(UpperCamelCase , """Use `from_pt=True` to load this model""" ): __UpperCAmelCase : Optional[Any] = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
139
0
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self , __lowerCAmelCase , ): UpperCamelCase__ = parent UpperCamelCase__ = 13 UpperCamelCase__ = 7 UpperCamelCase__ = 30 UpperCamelCase__ = self.seq_length + self.mem_len UpperCamelCase__ = 15 UpperCamelCase__ = True UpperCamelCase__ = True UpperCamelCase__ = 99 UpperCamelCase__ = [10, 50, 80] UpperCamelCase__ = 32 UpperCamelCase__ = 32 UpperCamelCase__ = 4 UpperCamelCase__ = 8 UpperCamelCase__ = 128 UpperCamelCase__ = 2 UpperCamelCase__ = 2 UpperCamelCase__ = None UpperCamelCase__ = 1 UpperCamelCase__ = 0 UpperCamelCase__ = 3 UpperCamelCase__ = self.vocab_size - 1 UpperCamelCase__ = 0.01 def _lowerCamelCase ( self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def _lowerCamelCase ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = TFTransfoXLModel(__lowerCAmelCase ) UpperCamelCase__ , UpperCamelCase__ = model(__lowerCAmelCase ).to_tuple() UpperCamelCase__ = {"""input_ids""": input_ids_a, """mems""": mems_a} UpperCamelCase__ , UpperCamelCase__ = model(__lowerCAmelCase ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = TFTransfoXLLMHeadModel(__lowerCAmelCase ) UpperCamelCase__ , UpperCamelCase__ = model(__lowerCAmelCase ).to_tuple() UpperCamelCase__ = {"""input_ids""": input_ids_a, """labels""": lm_labels} UpperCamelCase__ , UpperCamelCase__ = model(__lowerCAmelCase ).to_tuple() UpperCamelCase__ , UpperCamelCase__ = model([input_ids_a, mems_a] ).to_tuple() UpperCamelCase__ = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} UpperCamelCase__ , UpperCamelCase__ = model(__lowerCAmelCase ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = TFTransfoXLForSequenceClassification(__lowerCAmelCase ) UpperCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self ): UpperCamelCase__ = self.prepare_config_and_inputs() ((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case : int = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) snake_case : Tuple = () if is_tf_available() else () snake_case : List[str] = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented snake_case : Optional[int] = False snake_case : Union[str, Any] = False snake_case : Tuple = False snake_case : Dict = False def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _lowerCamelCase ( self ): UpperCamelCase__ = TFTransfoXLModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , d_embed=37 ) def _lowerCamelCase ( self ): self.config_tester.run_common_tests() def _lowerCamelCase ( self ): self.model_tester.set_seed() UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*__lowerCAmelCase ) def _lowerCamelCase ( self ): self.model_tester.set_seed() UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*__lowerCAmelCase ) def _lowerCamelCase ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__lowerCAmelCase ) def _lowerCamelCase ( self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: UpperCamelCase__ = model_class(__lowerCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: UpperCamelCase__ = model.get_output_embeddings() assert isinstance(__lowerCAmelCase , tf.keras.layers.Layer ) UpperCamelCase__ = model.get_bias() assert name is None else: UpperCamelCase__ = model.get_output_embeddings() assert x is None UpperCamelCase__ = model.get_bias() assert name is None def _lowerCamelCase ( self ): pass @slow def _lowerCamelCase ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = TFTransfoXLModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @unittest.skip(reason="""This model doesn\'t play well with fit() due to not returning a single loss.""" ) def _lowerCamelCase ( self ): pass @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def _lowerCamelCase ( self ): UpperCamelCase__ = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off UpperCamelCase__ = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off UpperCamelCase__ = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> UpperCamelCase__ = model.generate(__lowerCAmelCase , max_length=200 , do_sample=__lowerCAmelCase ) self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCAmelCase )
701
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def _UpperCamelCase (a__ :Any , a__ :Union[str, Any] ): """simple docstring""" UpperCamelCase__ = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) UpperCamelCase__ = DatasetInfosDict.from_directory(a__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ), ] , ) def _UpperCamelCase (a__ :Optional[int] , a__ :DatasetInfo ): """simple docstring""" UpperCamelCase__ = str(a__ ) dataset_info.write_to_directory(a__ ) UpperCamelCase__ = DatasetInfo.from_directory(a__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(a__ , """dataset_info.json""" ) ) def _UpperCamelCase (): """simple docstring""" UpperCamelCase__ = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) UpperCamelCase__ = dataset_info._to_yaml_dict() assert sorted(a__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) UpperCamelCase__ = yaml.safe_dump(a__ ) UpperCamelCase__ = yaml.safe_load(a__ ) assert dataset_info_yaml_dict == reloaded def _UpperCamelCase (): """simple docstring""" UpperCamelCase__ = DatasetInfo() UpperCamelCase__ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1337 ), } ), ] , ) def _UpperCamelCase (a__ :int , a__ :DatasetInfosDict ): """simple docstring""" UpperCamelCase__ = str(a__ ) dataset_infos_dict.write_to_directory(a__ ) UpperCamelCase__ = DatasetInfosDict.from_directory(a__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): UpperCamelCase__ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml UpperCamelCase__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(a__ , """README.md""" ) )
548
0
from __future__ import annotations def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return len(set(_lowercase ) ) == len(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
30
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCamelCase: '''simple docstring''' def __init__( self , snake_case_ , ): _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = 'gelu' _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase__ ( self ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self ): ( ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ) = self.prepare_config_and_inputs() _A = True _A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = TFEsmModel(config=snake_case_ ) _A = {'input_ids': input_ids, 'attention_mask': input_mask} _A = model(snake_case_ ) _A = [input_ids, input_mask] _A = model(snake_case_ ) _A = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ): _A = True _A = TFEsmModel(config=snake_case_ ) _A = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } _A = model(snake_case_ ) _A = [input_ids, input_mask] _A = model(snake_case_ , encoder_hidden_states=snake_case_ ) # Also check the case where encoder outputs are not passed _A = model(snake_case_ , attention_mask=snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = TFEsmForMaskedLM(config=snake_case_ ) _A = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_labels _A = TFEsmForTokenClassification(config=snake_case_ ) _A = {'input_ids': input_ids, 'attention_mask': input_mask} _A = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self ): _A = self.prepare_config_and_inputs() ( ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ( _A ), ) = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' __magic_name__ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) __magic_name__ = ( { 'feature-extraction': TFEsmModel, 'fill-mask': TFEsmForMaskedLM, 'text-classification': TFEsmForSequenceClassification, 'token-classification': TFEsmForTokenClassification, 'zero-shot': TFEsmForSequenceClassification, } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False def lowerCAmelCase__ ( self ): _A = TFEsmModelTester(self ) _A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def lowerCAmelCase__ ( self ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case_ ) @slow def lowerCAmelCase__ ( self ): for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFEsmModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @unittest.skip('Protein models do not support embedding resizing.' ) def lowerCAmelCase__ ( self ): pass @unittest.skip('Protein models do not support embedding resizing.' ) def lowerCAmelCase__ ( self ): pass def lowerCAmelCase__ ( self ): _A, _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(snake_case_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer _A = model.get_bias() assert isinstance(snake_case_ , snake_case_ ) for k, v in name.items(): assert isinstance(snake_case_ , tf.Variable ) else: _A = model.get_output_embeddings() assert x is None _A = model.get_bias() assert name is None @require_tf class lowerCamelCase( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self ): _A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(snake_case_ )[0] _A = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , snake_case_ ) # compare the actual values for a slice. _A = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def lowerCAmelCase__ ( self ): _A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) _A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _A = model(snake_case_ )[0] # compare the actual values for a slice. _A = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
27
0
import math def UpperCAmelCase__ ( _A ): """simple docstring""" a_ = [] a_ = 2 a_ = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment a_ = [True] * (end + 1) a_ = [] while start <= end: if temp[start] is True: in_prime.append(lowerCAmelCase__ ) for i in range(start * start , end + 1 , lowerCAmelCase__ ): a_ = False start += 1 prime += in_prime a_ = end + 1 a_ = min(2 * end , lowerCAmelCase__ ) while low <= n: a_ = [True] * (high - low + 1) for each in in_prime: a_ = math.floor(low / each ) * each if t < low: t += each for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ): a_ = False for j in range(len(lowerCAmelCase__ ) ): if temp[j] is True: prime.append(j + low ) a_ = high + 1 a_ = min(high + end , lowerCAmelCase__ ) return prime print(sieve(10**6))
709
from math import pow def UpperCAmelCase__ ( _A , _A , _A , _A , _A , ): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count a_ = int(pow(_A , _A ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n a_ , a_ = backtrack( _A , _A , current_number + 1 , _A , _A ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. a_ , a_ = backtrack( _A , _A , current_number + 1 , _A , _A ) return current_sum, solutions_count def UpperCAmelCase__ ( _A , _A ): """simple docstring""" if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(_A , _A , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
143
0