code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''Wav2Vec2FeatureExtractor'''
lowerCamelCase__ = '''AutoTokenizer'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.feature_extractor
snake_case__ : Optional[Any] = False
@classmethod
def __UpperCamelCase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
try:
return super().from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , __SCREAMING_SNAKE_CASE , )
snake_case__ : List[str] = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = WavaVecaCTCTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
snake_case__ : Optional[Any] = kwargs.pop("""raw_speech""" )
else:
snake_case__ : Union[str, Any] = kwargs.pop("""audio""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = kwargs.pop("""sampling_rate""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = kwargs.pop("""text""" , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case__ : Optional[Any] = args[0]
snake_case__ : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case__ : Optional[int] = self.feature_extractor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
snake_case__ : Any = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case__ : List[Any] = encodings["""input_ids"""]
return inputs
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = kwargs.pop("""input_features""" , __SCREAMING_SNAKE_CASE )
snake_case__ : str = kwargs.pop("""labels""" , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case__ : Dict = args[0]
snake_case__ : int = args[1:]
if input_features is not None:
snake_case__ : Any = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if labels is not None:
snake_case__ : List[Any] = self.tokenizer.pad(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case__ : Tuple = labels["""input_ids"""]
return input_features
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def __UpperCamelCase ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
snake_case__ : List[str] = True
snake_case__ : Any = self.tokenizer
yield
snake_case__ : str = self.feature_extractor
snake_case__ : str = False
| 38 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = torch.device("cpu")
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ :int = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ :int = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Tuple:
'''simple docstring'''
snake_case__ :List[Any] = dct.pop(__snake_case )
snake_case__ :Optional[Any] = val
def lowercase_ ( __snake_case : int ) -> int:
'''simple docstring'''
snake_case__ :Union[str, Any] = []
for k in state_dict.keys():
snake_case__ :int = k
if ".pwconv" in k:
snake_case__ :Tuple = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
snake_case__ :Tuple = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
snake_case__ :List[Any] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
snake_case__ :Tuple = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
snake_case__ :Union[str, Any] = k_new.split("." )
if ls[2].isdigit():
snake_case__ :Optional[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
snake_case__ :Union[str, Any] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowercase_ ( __snake_case : List[Any] , __snake_case : str , __snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ :str = 10_00
snake_case__ :Tuple = "huggingface/label-files"
snake_case__ :Union[str, Any] = "imagenet-1k-id2label.json"
snake_case__ :int = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
snake_case__ :Tuple = {int(__snake_case ): v for k, v in idalabel.items()}
snake_case__ :List[str] = idalabel
snake_case__ :List[str] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case__ :Dict = [3, 3, 6, 4]
snake_case__ :Any = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
snake_case__ :str = [3, 3, 9, 6]
snake_case__ :Optional[int] = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
snake_case__ :List[str] = [4, 3, 10, 5]
snake_case__ :List[Any] = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
snake_case__ :Tuple = [4, 4, 12, 6]
snake_case__ :List[Any] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
snake_case__ :Any = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" , check_hash=__snake_case )
else:
snake_case__ :Tuple = torch.load(__snake_case , map_location="cpu" )
snake_case__ :List[Any] = checkpoint
snake_case__ :List[str] = create_rename_keys(__snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
snake_case__ :Optional[Any] = SwiftFormerForImageClassification(__snake_case ).eval()
hf_model.load_state_dict(__snake_case )
# prepare test inputs
snake_case__ :Optional[int] = prepare_img()
snake_case__ :List[str] = ViTImageProcessor.from_pretrained("preprocessor_config" )
snake_case__ :Dict = processor(images=__snake_case , return_tensors="pt" )
# compare outputs from both models
snake_case__ :int = get_expected_output(__snake_case )
snake_case__ :List[Any] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , __snake_case , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 707 |
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod() | 57 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE = LxmertConfig.from_json_file(UpperCAmelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__SCREAMING_SNAKE_CASE = LxmertForPreTraining(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase__ =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 482 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class A__:
def __init__( self : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : float ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = probability
def _a ( self : Optional[int] ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, int]:
__SCREAMING_SNAKE_CASE = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = Counter(graph.get_nodes() )
__SCREAMING_SNAKE_CASE = start
for _ in range(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = graph.transition(UpperCAmelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Optional[int] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
__magic_name__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__magic_name__ = 128
elif "12-12" in model_name:
__magic_name__ = 12
__magic_name__ = 12
elif "14-14" in model_name:
__magic_name__ = 14
__magic_name__ = 14
elif "16-16" in model_name:
__magic_name__ = 16
__magic_name__ = 16
else:
raise ValueError('''Model not supported''' )
__magic_name__ = '''huggingface/label-files'''
if "speech-commands" in model_name:
__magic_name__ = 35
__magic_name__ = '''speech-commands-v2-id2label.json'''
else:
__magic_name__ = 527
__magic_name__ = '''audioset-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(snake_case_ ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
if "module.v" in name:
__magic_name__ = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
__magic_name__ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
__magic_name__ = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
__magic_name__ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__magic_name__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
__magic_name__ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__magic_name__ = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
__magic_name__ = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
__magic_name__ = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] ):
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[3] )
__magic_name__ = config.hidden_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[dim : dim * 2, :]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[:dim]
__magic_name__ = val[dim : dim * 2]
__magic_name__ = val[-dim:]
else:
__magic_name__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
__magic_name__ = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=False ):
__magic_name__ = get_audio_spectrogram_transformer_config(snake_case_ )
__magic_name__ = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
__magic_name__ = model_name_to_url[model_name]
__magic_name__ = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' )
# remove some keys
remove_keys(snake_case_ )
# rename some keys
__magic_name__ = convert_state_dict(snake_case_ , snake_case_ )
# load 🤗 model
__magic_name__ = ASTForAudioClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__magic_name__ = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
__magic_name__ = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
__magic_name__ = 1024 if '''speech-commands''' not in model_name else 128
__magic_name__ = ASTFeatureExtractor(mean=snake_case_ , std=snake_case_ , max_length=snake_case_ )
if "speech-commands" in model_name:
__magic_name__ = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
__magic_name__ = dataset[0]['''audio''']['''array''']
else:
__magic_name__ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
__magic_name__ , __magic_name__ = torchaudio.load(snake_case_ )
__magic_name__ = waveform.squeeze().numpy()
__magic_name__ = feature_extractor(snake_case_ , sampling_rate=1_6000 , return_tensors='''pt''' )
# forward pass
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__magic_name__ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__magic_name__ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__magic_name__ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__magic_name__ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__magic_name__ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__magic_name__ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__magic_name__ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__magic_name__ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(snake_case_ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ : List[Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 678 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 678 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
def __init__( self : List[Any] ,A : UNetaDModel ,A : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self : int ,A : int = 1 ,A : int = 20_00 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : Optional[str] = "pil" ,A : bool = True ,**A : Optional[int] ,):
__A = self.unet.config.sample_size
__A = (batch_size, 3, img_size, img_size)
__A = self.unet
__A = randn_tensor(A ,generator=A ) * self.scheduler.init_noise_sigma
__A = sample.to(self.device )
self.scheduler.set_timesteps(A )
self.scheduler.set_sigmas(A )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__A = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__A = self.unet(A ,A ).sample
__A = self.scheduler.step_correct(A ,A ,generator=A ).prev_sample
# prediction step
__A = model(A ,A ).sample
__A = self.scheduler.step_pred(A ,A ,A ,generator=A )
__A , __A = output.prev_sample, output.prev_sample_mean
__A = sample_mean.clamp(0 ,1 )
__A = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(A )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A )
| 55 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = "▁"
__lowerCAmelCase : int = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
__lowerCAmelCase : Tuple = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
__lowerCAmelCase : List[str] = {
"facebook/s2t-small-librispeech-asr": 1_024,
}
__lowerCAmelCase : int = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
__lowerCAmelCase : str = {"mustc": MUSTC_LANGS}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = MAX_MODEL_INPUT_SIZES
A__ : List[Any] = ['''input_ids''', '''attention_mask''']
A__ : List[int] = []
def __init__( self : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Dict="<s>" , _snake_case : Optional[Any]="</s>" , _snake_case : str="<pad>" , _snake_case : Optional[int]="<unk>" , _snake_case : Optional[int]=False , _snake_case : Any=False , _snake_case : List[Any]=None , _snake_case : Any=None , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[Any] , ):
__lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , do_upper_case=_snake_case , do_lower_case=_snake_case , tgt_lang=_snake_case , lang_codes=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__lowercase : Optional[Any] = do_upper_case
__lowercase : Optional[int] = do_lower_case
__lowercase : Optional[Any] = load_json(_snake_case )
__lowercase : str = {v: k for k, v in self.encoder.items()}
__lowercase : Any = spm_file
__lowercase : List[str] = load_spm(_snake_case , self.sp_model_kwargs )
if lang_codes is not None:
__lowercase : Any = lang_codes
__lowercase : Dict = LANGUAGES[lang_codes]
__lowercase : Any = [F'<lang:{lang}>' for lang in self.langs]
__lowercase : Optional[Any] = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs}
__lowercase : Dict = self.lang_tokens
__lowercase : Optional[Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__lowercase : int = {}
@property
def snake_case_ ( self : Optional[int] ):
return len(self.encoder )
@property
def snake_case_ ( self : Any ):
return self._tgt_lang
@tgt_lang.setter
def snake_case_ ( self : int , _snake_case : str ):
__lowercase : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_snake_case )
def snake_case_ ( self : Union[str, Any] , _snake_case : str ):
__lowercase : Tuple = self.lang_code_to_id[tgt_lang]
__lowercase : Optional[int] = [lang_code_id]
def snake_case_ ( self : str , _snake_case : str ):
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def snake_case_ ( self : Any , _snake_case : List[str] ):
return self.encoder.get(_snake_case , self.encoder[self.unk_token] )
def snake_case_ ( self : List[Any] , _snake_case : int ):
return self.decoder.get(_snake_case , self.unk_token )
def snake_case_ ( self : str , _snake_case : List[str] ):
__lowercase : Optional[Any] = []
__lowercase : Optional[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__lowercase : Union[str, Any] = self.sp_model.decode(_snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__lowercase : Optional[Any] = []
else:
current_sub_tokens.append(_snake_case )
__lowercase : Optional[int] = self.sp_model.decode(_snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def snake_case_ ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Any=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case_ ( self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
__lowercase : Any = [1] * len(self.prefix_tokens )
__lowercase : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def snake_case_ ( self : Any ):
__lowercase : Optional[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
__lowercase : Optional[int] = self.__dict__.copy()
__lowercase : Dict = None
return state
def __setstate__( self : Optional[Any] , _snake_case : Dict ):
__lowercase : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowercase : int = {}
__lowercase : List[str] = load_spm(self.spm_file , self.sp_model_kwargs )
def snake_case_ ( self : List[str] , _snake_case : str , _snake_case : Optional[str] = None ):
__lowercase : Dict = Path(_snake_case )
assert save_dir.is_dir(), F'{save_directory} should be a directory'
__lowercase : Tuple = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__lowercase : List[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _snake_case )
elif not os.path.isfile(self.spm_file ):
with open(_snake_case , '''wb''' ) as fi:
__lowercase : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (str(_snake_case ), str(_snake_case ))
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> sentencepiece.SentencePieceProcessor:
__lowercase : Union[str, Any] = sentencepiece.SentencePieceProcessor(**__lowerCAmelCase )
spm.Load(str(__lowerCAmelCase ) )
return spm
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Union[Dict, List]:
with open(__lowerCAmelCase , '''r''' ) as f:
return json.load(__lowerCAmelCase )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
with open(__lowerCAmelCase , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=2 )
| 509 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : int = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
__snake_case :int = 'swin2sr'
__snake_case :Dict = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , _lowerCAmelCase : Optional[int]=64 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=180 , _lowerCAmelCase : Tuple=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : Optional[int]=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : Tuple=8 , _lowerCAmelCase : List[Any]=2.0 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : int=2 , _lowerCAmelCase : List[str]=1.0 , _lowerCAmelCase : Tuple="1conv" , _lowerCAmelCase : List[Any]="pixelshuffle" , **_lowerCAmelCase : str , ) -> str:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(UpperCamelCase__ )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 710 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Any , *_lowerCAmelCase : Any , **_lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : Dict=None ) -> int:
"""simple docstring"""
__lowercase = {}
__lowercase = {}
if prompt is not None:
__lowercase = prompt
if generate_kwargs is not None:
__lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Union[str, Any] , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any=None ) -> Any:
"""simple docstring"""
__lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F'Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
__lowercase = self.model.config.model_type
if model_type == "git":
__lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
__lowercase = [self.tokenizer.cls_token_id] + input_ids
__lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__lowercase = None
return model_inputs
def _a ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__lowercase = None
if generate_kwargs is None:
__lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__lowercase = model_inputs.pop(self.model.main_input_name )
__lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self : List[str] , _lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = []
for output_ids in model_outputs:
__lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 80 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_snake_case = datasets.utils.logging.get_logger(__name__)
_snake_case = ['names', 'prefix']
_snake_case = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_snake_case = ['encoding_errors', 'on_bad_lines']
_snake_case = ['date_format']
@dataclass
class a__ ( datasets.BuilderConfig ):
_SCREAMING_SNAKE_CASE : str = ","
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[Union[int, List[int], str]] = "infer"
_SCREAMING_SNAKE_CASE : Optional[List[str]] = None
_SCREAMING_SNAKE_CASE : Optional[List[str]] = None
_SCREAMING_SNAKE_CASE : Optional[Union[int, str, List[int], List[str]]] = None
_SCREAMING_SNAKE_CASE : Optional[Union[List[int], List[str]]] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[Literal["c", "python", "pyarrow"]] = None
_SCREAMING_SNAKE_CASE : Dict[Union[int, str], Callable[[Any], Any]] = None
_SCREAMING_SNAKE_CASE : Optional[list] = None
_SCREAMING_SNAKE_CASE : Optional[list] = None
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : Optional[Union[int, List[int]]] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : str = "."
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : str = '"'
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : int = 1_0000
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
_SCREAMING_SNAKE_CASE : Optional[str] = "strict"
_SCREAMING_SNAKE_CASE : Literal["error", "warn", "skip"] = "error"
_SCREAMING_SNAKE_CASE : Optional[str] = None
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.delimiter is not None:
_lowercase : Optional[Any] = self.delimiter
if self.column_names is not None:
_lowercase : str = self.column_names
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _UpperCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class a__ ( datasets.ArrowBasedBuilder ):
_SCREAMING_SNAKE_CASE : int = CsvConfig
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_lowercase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCamelCase , (str, list, tuple) ):
_lowercase : List[Any] = data_files
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase : Union[str, Any] = [files]
_lowercase : int = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_lowercase : List[Any] = []
for split_name, files in data_files.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase : Tuple = [files]
_lowercase : Tuple = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCamelCase , gen_kwargs={"files": files} ) )
return splits
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if self.config.features is not None:
_lowercase : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(_UpperCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
_lowercase : int = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_UpperCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowercase : Optional[int] = table_cast(_UpperCamelCase , _UpperCamelCase )
return pa_table
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowercase : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_UpperCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCamelCase ) ):
_lowercase : List[str] = pd.read_csv(_UpperCamelCase , iterator=_UpperCamelCase , dtype=_UpperCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_UpperCamelCase ):
_lowercase : Optional[Any] = pa.Table.from_pandas(_UpperCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_UpperCamelCase )}: {e}''' )
raise
| 245 | 0 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ : Tuple = logging.getLogger()
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
return args.f
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCAmelCase__ , """argv""" , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase__ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase__ ) | 178 |
'''simple docstring'''
UpperCamelCase__ : List[str] = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 178 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase : Any = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A ( A ) -> Optional[Any]:
config.addinivalue_line(
"markers" ,"is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" ,"is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" ,"is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" ,"is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" ,"accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" ,"tool_tests: mark the tool tests that are run on their specific schedule" )
def _A ( A ) -> int:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def _A ( A ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(A ,id=A )
def _A ( A ,A ) -> Optional[int]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase : Any = 0
# Doctest custom flag to ignore output.
lowerCAmelCase : Optional[Any] = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase : Tuple = doctest.OutputChecker
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def a__ ( self , a_ , a_ , a_ ) -> Any:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a_ , a_ , a_ )
lowerCAmelCase : Union[str, Any] = CustomOutputChecker
lowerCAmelCase : Optional[Any] = HfDoctestModule
lowerCAmelCase : Tuple = HfDocTestParser
| 372 |
'''simple docstring'''
import baseaa
def _A ( A ) -> bytes:
return baseaa.aaaencode(string.encode("utf-8" ) )
def _A ( A ) -> str:
return baseaa.aaadecode(A ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __lowercase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : Dict ):
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowercase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(lowerCamelCase_ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace("*" , lowerCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = "weight_v"
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = "weight"
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = "bias"
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowercase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE__ = name.split("." )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
def __lowercase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = SEWConfig()
if is_finetuned:
SCREAMING_SNAKE_CASE__ = model.wav_encoder.wav_model.cfg
else:
SCREAMING_SNAKE_CASE__ = model.cfg
SCREAMING_SNAKE_CASE__ = fs_config.conv_bias
SCREAMING_SNAKE_CASE__ = eval(fs_config.conv_feature_layers )
SCREAMING_SNAKE_CASE__ = [x[0] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = [x[1] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = [x[2] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = "gelu"
SCREAMING_SNAKE_CASE__ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = fs_config.activation_fn.name
SCREAMING_SNAKE_CASE__ = fs_config.encoder_embed_dim
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = fs_config.encoder_ffn_embed_dim
SCREAMING_SNAKE_CASE__ = 1e-5
SCREAMING_SNAKE_CASE__ = fs_config.encoder_layerdrop
SCREAMING_SNAKE_CASE__ = fs_config.encoder_attention_heads
SCREAMING_SNAKE_CASE__ = fs_config.conv_pos_groups
SCREAMING_SNAKE_CASE__ = fs_config.conv_pos
SCREAMING_SNAKE_CASE__ = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = fs_config.encoder_layers
SCREAMING_SNAKE_CASE__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
SCREAMING_SNAKE_CASE__ = model.cfg
SCREAMING_SNAKE_CASE__ = fs_config.final_dropout
SCREAMING_SNAKE_CASE__ = fs_config.layerdrop
SCREAMING_SNAKE_CASE__ = fs_config.activation_dropout
SCREAMING_SNAKE_CASE__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
SCREAMING_SNAKE_CASE__ = fs_config.attention_dropout
SCREAMING_SNAKE_CASE__ = fs_config.dropout_input
SCREAMING_SNAKE_CASE__ = fs_config.dropout
SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_length
SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_prob
SCREAMING_SNAKE_CASE__ = fs_config.mask_length
SCREAMING_SNAKE_CASE__ = fs_config.mask_prob
SCREAMING_SNAKE_CASE__ = "Wav2Vec2FeatureExtractor"
SCREAMING_SNAKE_CASE__ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def __lowercase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : str=None , lowerCamelCase_ : Union[str, Any]=True ):
if is_finetuned:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
SCREAMING_SNAKE_CASE__ = SEWConfig.from_pretrained(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = convert_config(model[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(lowerCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCamelCase_ , "vocab.json" )
if not os.path.isdir(lowerCamelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase_ ) )
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = SEWForCTC(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = SEWModel(lowerCamelCase_ )
feature_extractor.save_pretrained(lowerCamelCase_ )
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_lowerCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 112 |
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_lowerCamelCase = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_lowerCamelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self ):
SCREAMING_SNAKE_CASE__ = WATERMARK_BITS
SCREAMING_SNAKE_CASE__ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
SCREAMING_SNAKE_CASE__ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = [self.encoder.encode(UpperCAmelCase__ , "dwtDct" ) for image in images]
SCREAMING_SNAKE_CASE__ = torch.from_numpy(np.array(UpperCAmelCase__ ) ).permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 112 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
A_ : int = 1_024
A_ : Union[str, Any] = 4_096
A_ : Any = 24
A_ : Tuple = 16
A_ : Tuple = [5, 11, 17, 23]
A_ : str = [256, 512, 1_024, 1_024]
A_ : Dict = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
A_ : List[str] = 768
A_ : Optional[Any] = [1, 1, 1, 0.5]
A_ : str = [256, 512, 768, 768]
A_ : str = 150
A_ : int = 16
A_ : List[Any] = (1, 384, 384)
A_ : int = False
A_ : Any = """project"""
if "ade" in checkpoint_url:
A_ : Optional[int] = True
A_ : List[Any] = 768
A_ : Tuple = [1, 1, 1, 0.5]
A_ : Optional[int] = 150
A_ : Optional[Any] = 16
A_ : List[Any] = """huggingface/label-files"""
A_ : Optional[int] = """ade20k-id2label.json"""
A_ : Optional[int] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="""dataset""" ) ) , """r""" ) )
A_ : Optional[int] = {int(snake_case__ ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Dict = [1, 150, 480, 480]
return config, expected_shape
def __UpperCamelCase ( snake_case__ ):
A_ : Tuple = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A_ : List[Any] = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
A_ : str = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
A_ : Union[str, Any] = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
A_ : Optional[int] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
A_ : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
A_ : List[Any] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
A_ : str = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
A_ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
A_ : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
A_ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
A_ : List[Any] = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
A_ : str = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
A_ : Optional[int] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
A_ : Optional[Any] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
A_ : Any = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
A_ : List[Any] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
A_ : Optional[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A_ : Optional[Any] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
A_ : int = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
A_ : Optional[Any] = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
A_ : str = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
A_ : Tuple = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
A_ : Any = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A_ : Tuple = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A_ : Union[str, Any] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A_ : List[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A_ : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A_ : Any = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A_ : List[str] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A_ : Optional[int] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A_ : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A_ : str = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A_ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A_ : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A_ : str = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
A_ : str = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
A_ : List[str] = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
A_ : Dict = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
A_ : Union[str, Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
A_ : Dict = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
A_ : Any = name.replace("""..""" , """.""" )
if "stem.conv" in name:
A_ : str = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
A_ : str = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
A_ : Optional[int] = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
A_ : List[str] = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
A_ : Dict = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
A_ : int = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
A_ : List[str] = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def __UpperCamelCase ( snake_case__ , snake_case__ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Optional[Any] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
A_ : Any = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : Tuple = in_proj_weight[: config.hidden_size, :]
A_ : Optional[int] = in_proj_bias[: config.hidden_size]
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( ):
A_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
A_ , A_ : Optional[int] = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A_ : int = torch.load(snake_case__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
A_ : Union[str, Any] = state_dict.pop(snake_case__ )
A_ : int = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
A_ : Optional[Any] = DPTForSemanticSegmentation(snake_case__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
A_ : List[str] = 480 if """ade""" in checkpoint_url else 384
A_ : str = DPTImageProcessor(size=snake_case__ )
A_ : int = prepare_img()
A_ : int = image_processor(snake_case__ , return_tensors="""pt""" )
# forward pass
A_ : int = model(**snake_case__ ).logits if """ade""" in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
A_ : str = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_lowerCAmelCase = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 180 |
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if len(snake_case__ ) == 0:
return False
A_ : Union[str, Any] = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = input("Enter numbers separated by comma:\n").strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(",")]
_lowerCAmelCase = int(input("Enter the number to be found in the list:\n").strip())
_lowerCAmelCase = "" if binary_search(sequence, target) else "not "
print(F'{target} was {not_str}found in {sequence}')
| 180 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 436 |
import math
def __magic_name__ ( lowercase ) -> bool:
"""simple docstring"""
lowercase_ : Optional[Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowercase )
def __magic_name__ ( lowercase = 1 / 12345 ) -> int:
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : List[Any] = 0
lowercase_ : List[str] = 3
while True:
lowercase_ : int = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowercase ):
lowercase_ : Optional[int] = int(lowercase )
total_partitions += 1
if check_partition_perfect(lowercase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowercase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''') | 436 | 1 |
def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def _a ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 85 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase : int = logging.getLogger(__name__)
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = """masked_bert"""
def __init__( self ,__a=30_522 ,__a=768 ,__a=12 ,__a=12 ,__a=3_072 ,__a="gelu" ,__a=0.1 ,__a=0.1 ,__a=512 ,__a=2 ,__a=0.02 ,__a=1E-12 ,__a=0 ,__a="topK" ,__a="constant" ,__a=0.0 ,**__a ,) -> List[str]:
super().__init__(pad_token_id=__a ,**__a )
snake_case : Dict = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : Dict = hidden_act
snake_case : Any = intermediate_size
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : int = type_vocab_size
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = pruning_method
snake_case : Union[str, Any] = mask_init
snake_case : int = mask_scale
| 116 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase_ = False
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return 100
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Union[str, Any] ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase : Any =UNetaDConditionModel(**UpperCAmelCase__ )
return model
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : str =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Dict =self.dummy_unet
lowercase : List[str] =self.dummy_movq
lowercase : str ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase : Union[str, Any] =DDIMScheduler(**UpperCAmelCase__ )
lowercase : Union[str, Any] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any=0 ):
'''simple docstring'''
lowercase : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : List[str] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase__ )
# create init_image
lowercase : Union[str, Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Union[str, Any] =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
lowercase : Dict =floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : List[str] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : Optional[int] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : int ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[Any] ='''cpu'''
lowercase : Any =self.get_dummy_components()
lowercase : List[Any] =self.pipeline_class(**UpperCAmelCase__ )
lowercase : Union[str, Any] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] =pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
lowercase : List[str] =output.images
lowercase : Any =pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0]
lowercase : Union[str, Any] =image[0, -3:, -3:, -1]
lowercase : str =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Tuple =np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Any =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase : List[Any] =init_image.resize((512, 512) )
lowercase : List[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase : List[str] =torch.from_numpy(np.array(UpperCAmelCase__ ) ).float() / 2_55.0
lowercase : Any =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase : Tuple ='''A robot, 4k photo'''
lowercase : Optional[int] =KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase__ )
lowercase : str =KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase : Union[str, Any] =pipeline.to(UpperCAmelCase__ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase , lowercase : Tuple =pipe_prior(
UpperCAmelCase__ , image=UpperCAmelCase__ , strength=0.85 , generator=UpperCAmelCase__ , negative_prompt='''''' , ).to_tuple()
lowercase : Optional[int] =pipeline(
image=UpperCAmelCase__ , image_embeds=UpperCAmelCase__ , negative_image_embeds=UpperCAmelCase__ , hint=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
lowercase : Any =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def A ( lowercase__ : int ) -> str:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
UpperCamelCase__ :Union[str, Any] = precision
UpperCamelCase__ :List[str] = ceil(precision / 14 )
UpperCamelCase__ :Optional[int] = 42_6880 * Decimal(1_0005 ).sqrt()
UpperCamelCase__ :Any = 1
UpperCamelCase__ :str = 1359_1409
UpperCamelCase__ :int = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
UpperCamelCase__ :Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase = 50
print(f'''The first {n} digits of pi is: {pi(n)}''') | 45 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 45 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index == r:
for j in range(SCREAMING_SNAKE_CASE__ ):
print(data[j] ,end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCAmelCase : str = arr[i]
combination_util(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,index + 1 ,SCREAMING_SNAKE_CASE__ ,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,0 ,SCREAMING_SNAKE_CASE__ ,0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCAmelCase : int =[10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 693 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 1 |
from math import sqrt
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
lowercase_ :Any = 0
for i in range(1 , int(sqrt(_a ) + 1 ) ):
if n % i == 0 and i != sqrt(_a ):
total += i + n // i
elif i == sqrt(_a ):
total += i
return total - n
def UpperCamelCase ( _a = 1_0_0_0_0 ) -> int:
'''simple docstring'''
lowercase_ :Tuple = sum(
i
for i in range(1 , _a )
if sum_of_divisors(sum_of_divisors(_a ) ) == i and sum_of_divisors(_a ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 257 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE : Any = False
try:
SCREAMING_SNAKE_CASE : List[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ = None , UpperCamelCase_ = [] ):
lowercase_ :str = 0
lowercase_ :str = choices
lowercase_ :List[str] = prompt
if sys.platform == "win32":
lowercase_ :List[Any] = '''*'''
else:
lowercase_ :str = '''➔ '''
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase_ )
else:
forceWrite(self.choices[index] , UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(UpperCamelCase_ )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = 1 ):
lowercase_ :Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase_ )
move_cursor(UpperCamelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(10 )] )
def UpperCamelCase ( self ):
lowercase_ :int = int(chr(self.current_selection ) )
lowercase_ :Optional[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase_ )
else:
return
else:
return
def UpperCamelCase ( self , UpperCamelCase_ = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
lowercase_ :str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase_ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowercase_ :Optional[Any] = int(builtins.input() )
except ValueError:
lowercase_ :List[Any] = default_choice
else:
lowercase_ :List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(UpperCamelCase_ , '''\n''' )
return choice
| 257 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 | import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCamelCase :Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 452 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class a :
def __init__( self , _snake_case , _snake_case=14 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=4 , _snake_case=4 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=0.02 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = rotary_dim
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = None
lowerCAmelCase = vocab_size - 1
lowerCAmelCase = vocab_size - 1
lowerCAmelCase = vocab_size - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=a_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = 20
lowerCAmelCase = model_class_name(a_ )
lowerCAmelCase = model.init_cache(input_ids.shape[0] , a_ )
lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=a_ , past_key_values=a_ , position_ids=a_ , )
lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=a_ , past_key_values=outputs_cache.past_key_values , position_ids=a_ , )
lowerCAmelCase = model(a_ )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = 20
lowerCAmelCase = model_class_name(a_ )
lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase = model.init_cache(input_ids.shape[0] , a_ )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=a_ , past_key_values=a_ , position_ids=a_ , )
lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=a_ , position_ids=a_ , )
lowerCAmelCase = model(a_ , attention_mask=a_ )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class a ( a_ , a_ , unittest.TestCase ):
snake_case__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxGPTJModelTester(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(a_ , a_ , a_ , a_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
a_ , a_ , a_ , a_ )
@tooslow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
lowerCAmelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=a_ , truncation=a_ )
lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCAmelCase = False
lowerCAmelCase = model.config.eos_token_id
lowerCAmelCase = jax.jit(model.generate )
lowerCAmelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
lowerCAmelCase = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(a_ , a_ )
@is_pt_flax_cross_test
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase = self._prepare_for_class(a_ , a_ )
lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase = getattr(a_ , a_ )
lowerCAmelCase = pt_inputs["input_ids"].shape
lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = pt_model_class(a_ ).eval()
lowerCAmelCase = model_class(a_ , dtype=jnp.floataa )
lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a_ )
lowerCAmelCase = fx_state
with torch.no_grad():
lowerCAmelCase = pt_model(**a_ ).to_tuple()
lowerCAmelCase = fx_model(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(a_ )
lowerCAmelCase = model_class.from_pretrained(a_ , from_pt=a_ )
lowerCAmelCase = fx_model_loaded(**a_ ).to_tuple()
self.assertEqual(
len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase = self._prepare_for_class(a_ , a_ )
lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase = getattr(a_ , a_ )
lowerCAmelCase = pt_model_class(a_ ).eval()
lowerCAmelCase = model_class(a_ , dtype=jnp.floataa )
lowerCAmelCase = load_flax_weights_in_pytorch_model(a_ , fx_model.params )
lowerCAmelCase = pt_inputs["input_ids"].shape
lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 0
lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase = pt_model(**a_ ).to_tuple()
lowerCAmelCase = fx_model(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(a_ )
lowerCAmelCase = pt_model_class.from_pretrained(a_ , from_flax=a_ )
with torch.no_grad():
lowerCAmelCase = pt_model_loaded(**a_ ).to_tuple()
self.assertEqual(
len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
| 4 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
SCREAMING_SNAKE_CASE_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
SCREAMING_SNAKE_CASE_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class snake_case_ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ConvBertTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_=True , a_=None , **a_ , ):
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
a_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a_ ) != do_lower_case
or normalizer_state.get("strip_accents" , a_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a_ ) != tokenize_chinese_chars
):
a_ : Optional[int] = getattr(a_ , normalizer_state.pop("type" ) )
a_ : Tuple = do_lower_case
a_ : int = strip_accents
a_ : List[str] = tokenize_chinese_chars
a_ : List[Any] = normalizer_class(**a_ )
a_ : Optional[Any] = do_lower_case
def snake_case_ ( self , a_ , a_=None ):
a_ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a_ , a_ = None ):
a_ : Optional[int] = [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , a_ , a_ = None ):
a_ : Dict = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ ) | 237 | 0 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=[3_0, 3_0] , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=8 , UpperCAmelCase=1_0 , ) -> str:
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = scope
__a = n_targets
__a = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__a = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__a = num_patches + 1 + self.num_detection_tokens
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__a = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__a = []
for i in range(self.batch_size ):
__a = {}
__a = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCamelCase )
__a = torch.rand(self.n_targets , 4 , device=_UpperCamelCase )
labels.append(_UpperCamelCase )
__a = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
__a = YolosModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__a = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
__a = YolosForObjectDetection(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__a = model(pixel_values=_UpperCamelCase )
__a = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__a = model(pixel_values=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.prepare_config_and_inputs()
__a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
A__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
A__ : Optional[Any] = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
A__ : Optional[Any] = False
A__ : List[str] = False
A__ : Optional[int] = False
A__ : str = False
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[int]:
__a = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__a = []
for i in range(self.model_tester.batch_size ):
__a = {}
__a = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCamelCase , dtype=torch.long )
__a = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCamelCase , dtype=torch.float )
labels.append(_UpperCamelCase )
__a = labels
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = YolosModelTester(self )
__a = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_UpperCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
# in YOLOS, the seq_len is different
__a = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__a = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__a = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__a = len(_UpperCamelCase )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__a = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCamelCase ) )
__a = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__a = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__a = outputs.hidden_states
__a = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# YOLOS has a different seq_length
__a = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCamelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = YolosModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowerCAmelCase( ):
__a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(_UpperCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
__a = model(inputs.pixel_values )
# verify outputs
__a = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
__a = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_UpperCamelCase , )
__a = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify postprocessing
__a = image_processor.post_process_object_detection(
_UpperCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__a = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(_UpperCamelCase )
__a = [7_5, 7_5, 1_7, 6_3, 1_7]
__a = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_UpperCamelCase )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , _UpperCamelCase , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , _UpperCamelCase )
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCamelCase ) )
| 715 | from __future__ import annotations
lowerCamelCase_ : List[Any] = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> None:
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def __SCREAMING_SNAKE_CASE ( self ) -> None:
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCAmelCase )
__a = vertex
queue.append(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(UpperCAmelCase )
if target_vertex_parent is None:
__a = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(UpperCAmelCase )
return self.shortest_path(UpperCAmelCase ) + f'''->{target_vertex}'''
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 246 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
_UpperCAmelCase : List[str] = 3
def snake_case__ ( UpperCamelCase ) -> int:
print('''Generating primitive root of p''' )
while True:
_UpperCamelCase : Optional[Any] = random.randrange(3 ,__lowercase )
if pow(__lowercase ,2 ,__lowercase ) == 1:
continue
if pow(__lowercase ,__lowercase ,__lowercase ) == 1:
continue
return g
def snake_case__ ( UpperCamelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('''Generating prime p...''' )
_UpperCamelCase : List[str] = rabin_miller.generate_large_prime(__lowercase ) # select large prime number.
_UpperCamelCase : List[str] = primitive_root(__lowercase ) # one primitive root on modulo p.
_UpperCamelCase : Any = random.randrange(3 ,__lowercase ) # private_key -> have to be greater than 2 for safety.
_UpperCamelCase : List[Any] = cryptomath.find_mod_inverse(pow(__lowercase ,__lowercase ,__lowercase ) ,__lowercase )
_UpperCamelCase : Tuple = (key_size, e_a, e_a, p)
_UpperCamelCase : Union[str, Any] = (key_size, d)
return public_key, private_key
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> None:
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
f'''\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
_UpperCamelCase : Union[str, Any] = generate_key(__lowercase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' ,'''w''' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' ,'''w''' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def snake_case__ ( ) -> None:
print('''Making key files...''' )
make_key_files('''elgamal''' ,20_48 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 683 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def _lowerCAmelCase ( *a : int , **a : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase = image_classifier(a , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowercase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(a ) , [
[
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
],
[
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
],
[
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
],
[
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
],
[
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
],
] , )
@require_tf
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase = image_classifier(a , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(a ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowercase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(a ) , [
[
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
],
[
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
],
[
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
],
[
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
],
[
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
{'''score''': 0.333, '''label''': ANY(a )},
],
] , )
@slow
@require_torch
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase = image_classifier(a , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(a ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowercase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(a ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase = image_classifier(a , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(a ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowercase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(a ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , ) | 396 |
"""simple docstring"""
def A_ ( __UpperCamelCase : list ):
for i in range(len(__UpperCamelCase ) - 1 , 0 , -1 ):
lowercase = False
for j in range(__UpperCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase , lowercase = unsorted[j - 1], unsorted[j]
lowercase = True
for j in range(__UpperCamelCase ):
if unsorted[j] > unsorted[j + 1]:
lowercase , lowercase = unsorted[j + 1], unsorted[j]
lowercase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''') | 396 | 1 |
_lowerCAmelCase: dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
_lowerCAmelCase: dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def _lowercase( __a : float , __a : str , __a : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
a__ =(
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {', '.join(__a )}"""
)
raise ValueError(__a )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
return getitem, k
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
return setitem, k, v
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] ) -> List[str]:
return delitem, k
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict , *SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Dict:
try:
return fun(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ), None
except Exception as e:
return None, e
_UpperCAmelCase = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
_UpperCAmelCase = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
_UpperCAmelCase = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
_UpperCAmelCase = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
_UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple ) -> List[Any]:
__lowerCAmelCase : Optional[Any] = HashMap(initial_block_size=4 )
__lowerCAmelCase : Union[str, Any] = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE ) == str(SCREAMING_SNAKE_CASE )
assert set(SCREAMING_SNAKE_CASE ) == set(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
assert set(my.items() ) == set(py.items() )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
def is_public(SCREAMING_SNAKE_CASE :str ) -> bool:
return not name.startswith("""_""" )
__lowerCAmelCase : int = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE )}
__lowerCAmelCase : List[str] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE )}
assert dict_public_names > hash_public_names | 504 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : str = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 316 | from __future__ import annotations
def _snake_case ( lowerCAmelCase : list[int] ): # This function is recursive
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = len(lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE_ : str = array[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Any = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE_ : str = longest_subsequence(lowerCAmelCase )
if len(lowerCAmelCase ) > len(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE_ : Optional[int] = [pivot, *longest_subsequence(lowerCAmelCase )]
if len(lowerCAmelCase ) > len(lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 1 |
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: list[list[int]] ):
def update_area_of_max_square(lowerCAmelCase_: int , lowerCAmelCase_: int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : Optional[int] = update_area_of_max_square(lowerCAmelCase_ , col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 , col + 1 )
snake_case_ : str = update_area_of_max_square(row + 1 , lowerCAmelCase_ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Optional[Any] = max(largest_square_area[0] , lowerCAmelCase_ )
return sub_problem_sol
else:
return 0
snake_case_ : Tuple = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Optional[Any] = update_area_of_max_square_using_dp_array(lowerCAmelCase_ , col + 1 , lowerCAmelCase_ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCAmelCase_ )
snake_case_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , lowerCAmelCase_ , lowerCAmelCase_ )
if mat[row][col]:
snake_case_ : Optional[int] = 1 + min([right, diagonal, down] )
snake_case_ : List[Any] = max(largest_square_area[0] , lowerCAmelCase_ )
snake_case_ : Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : Tuple = [0]
snake_case_ : Tuple = [[-1] * cols for _ in range(lowerCAmelCase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowerCAmelCase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: list[list[int]] ):
snake_case_ : Optional[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case_ : str = dp_array[row][col + 1]
snake_case_ : Optional[Any] = dp_array[row + 1][col + 1]
snake_case_ : str = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = max(dp_array[row][col] , lowerCAmelCase_ )
else:
snake_case_ : Optional[int] = 0
return largest_square_area
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: list[list[int]] ):
snake_case_ : List[str] = [0] * (cols + 1)
snake_case_ : List[Any] = [0] * (cols + 1)
snake_case_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case_ : str = current_row[col + 1]
snake_case_ : str = next_row[col + 1]
snake_case_ : List[Any] = next_row[col]
if mat[row][col] == 1:
snake_case_ : Tuple = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Dict = max(current_row[col] , lowerCAmelCase_ )
else:
snake_case_ : Optional[int] = 0
snake_case_ : Dict = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 666 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 'ylacombe/bark-small'
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 'en_speaker_1'
UpperCAmelCase = 'This is a test string'
UpperCAmelCase = 'speaker_embeddings_path.json'
UpperCAmelCase = 'speaker_embeddings'
def snake_case_ ( self , **a_ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a_ )
def snake_case_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BarkProcessor(tokenizer=a_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase = 3_5
UpperCAmelCase = 2
UpperCAmelCase = 8
UpperCAmelCase = {
'semantic_prompt': np.ones(a_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase = processor(text=self.input_string , voice_preset=a_ )
UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(a_ , **a_ )
UpperCAmelCase = processor(text=self.input_string , voice_preset=a_ )
UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BarkProcessor(tokenizer=a_ )
UpperCAmelCase = processor(text=self.input_string )
UpperCAmelCase = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=a_ , return_attention_mask=a_ , return_token_type_ids=a_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 447 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase_ = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase_ ) )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase_ = [sequences]
lowercase_ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(a__ )
class __lowerCamelCase ( a__ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase=ZeroShotClassificationArgumentHandler() , *UpperCAmelCase , **UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = args_parser
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def A__ ( self , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=TruncationStrategy.ONLY_FIRST , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
lowercase_ = self.tokenizer.eos_token
try:
lowercase_ = self.tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , )
except Exception as e:
if "too short" in str(lowerCamelCase_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowercase_ = self.tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def A__ ( self , **UpperCAmelCase ) -> int:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase_ ) is not None:
lowercase_ = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
lowercase_ = {}
if "candidate_labels" in kwargs:
lowercase_ = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
lowercase_ = kwargs["hypothesis_template"]
lowercase_ = {}
if "multi_label" in kwargs:
lowercase_ = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
if len(lowerCamelCase_ ) == 0:
pass
elif len(lowerCamelCase_ ) == 1 and "candidate_labels" not in kwargs:
lowercase_ = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase="This example is {}." ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self._args_parser(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
lowercase_ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase_ ) - 1,
**model_input,
}
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = inputs["candidate_label"]
lowercase_ = inputs["sequence"]
lowercase_ = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowercase_ = self.model(**lowerCamelCase_ )
lowercase_ = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def A__ ( self , UpperCAmelCase , UpperCAmelCase=False ) -> Any:
'''simple docstring'''
lowercase_ = [outputs["candidate_label"] for outputs in model_outputs]
lowercase_ = [outputs["sequence"] for outputs in model_outputs]
lowercase_ = np.concatenate([output["logits"].numpy() for output in model_outputs] )
lowercase_ = logits.shape[0]
lowercase_ = len(lowerCamelCase_ )
lowercase_ = N // n
lowercase_ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowercase_ = self.entailment_id
lowercase_ = -1 if entailment_id == 0 else 0
lowercase_ = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowercase_ = np.exp(lowerCamelCase_ ) / np.exp(lowerCamelCase_ ).sum(-1 , keepdims=lowerCamelCase_ )
lowercase_ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowercase_ = reshaped_outputs[..., self.entailment_id]
lowercase_ = np.exp(lowerCamelCase_ ) / np.exp(lowerCamelCase_ ).sum(-1 , keepdims=lowerCamelCase_ )
lowercase_ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 710 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(__lowerCamelCase )[0].split("." )[-2]
lowercase_ = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
lowercase_ = "weight_g"
elif "weight_v" in name:
lowercase_ = "weight_v"
elif "weight" in name:
lowercase_ = "weight"
elif "bias" in name:
lowercase_ = "bias"
else:
lowercase_ = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = full_name.split("conv_layers." )[-1]
lowercase_ = name.split("." )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = SEWConfig()
if is_finetuned:
lowercase_ = model.wav_encoder.wav_model.cfg
else:
lowercase_ = model.cfg
lowercase_ = fs_config.conv_bias
lowercase_ = eval(fs_config.conv_feature_layers )
lowercase_ = [x[0] for x in conv_layers]
lowercase_ = [x[1] for x in conv_layers]
lowercase_ = [x[2] for x in conv_layers]
lowercase_ = "gelu"
lowercase_ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
lowercase_ = 0.0
lowercase_ = fs_config.activation_fn.name
lowercase_ = fs_config.encoder_embed_dim
lowercase_ = 0.02
lowercase_ = fs_config.encoder_ffn_embed_dim
lowercase_ = 1E-5
lowercase_ = fs_config.encoder_layerdrop
lowercase_ = fs_config.encoder_attention_heads
lowercase_ = fs_config.conv_pos_groups
lowercase_ = fs_config.conv_pos
lowercase_ = len(__lowerCamelCase )
lowercase_ = fs_config.encoder_layers
lowercase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase_ = model.cfg
lowercase_ = fs_config.final_dropout
lowercase_ = fs_config.layerdrop
lowercase_ = fs_config.activation_dropout
lowercase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase_ = fs_config.attention_dropout
lowercase_ = fs_config.dropout_input
lowercase_ = fs_config.dropout
lowercase_ = fs_config.mask_channel_length
lowercase_ = fs_config.mask_channel_prob
lowercase_ = fs_config.mask_length
lowercase_ = fs_config.mask_prob
lowercase_ = "Wav2Vec2FeatureExtractor"
lowercase_ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple=None , __lowerCamelCase: List[Any]=None , __lowerCamelCase: str=True ):
'''simple docstring'''
if is_finetuned:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase_ = SEWConfig.from_pretrained(__lowerCamelCase )
else:
lowercase_ = convert_config(model[0] , __lowerCamelCase )
lowercase_ = model[0].eval()
lowercase_ = True if config.feat_extract_norm == "layer" else False
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
if is_finetuned:
if dict_path:
lowercase_ = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.eos_index
lowercase_ = len(target_dict.symbols )
lowercase_ = os.path.join(__lowerCamelCase , "vocab.json" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase )
lowercase_ = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowerCamelCase , )
lowercase_ = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowercase_ = SEWForCTC(__lowerCamelCase )
else:
lowercase_ = SEWModel(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 601 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
_UpperCAmelCase : int = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
_UpperCAmelCase : str = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_UpperCAmelCase : Optional[int] = Counter()
for tk_ids in data:
counter.update(tk_ids)
_UpperCAmelCase : Union[str, Any] = [0] * args.vocab_size
for k, v in counter.items():
_UpperCAmelCase : Union[str, Any] = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 107 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
A__ : Tuple =logging.get_logger(__name__)
A__ : Any ={
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Tuple = '''imagegpt'''
_lowercase: Optional[int] = ['''past_key_values''']
_lowercase: str = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , __snake_case : Optional[Any]=5_12 + 1 , __snake_case : Union[str, Any]=32 * 32 , __snake_case : List[Any]=5_12 , __snake_case : Any=24 , __snake_case : Optional[Any]=8 , __snake_case : List[Any]=None , __snake_case : str="quick_gelu" , __snake_case : List[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=1E-5 , __snake_case : Optional[int]=0.02 , __snake_case : str=True , __snake_case : Dict=True , __snake_case : Union[str, Any]=False , __snake_case : Dict=False , __snake_case : Union[str, Any]=False , **__snake_case : Union[str, Any] , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=__snake_case , **__snake_case )
class UpperCAmelCase ( snake_case_ ):
@property
def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowercase__ ( self : Union[str, Any] , __snake_case : "FeatureExtractionMixin" , __snake_case : int = 1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional["TensorType"] = None , __snake_case : int = 3 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_images(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCAmelCase = dict(preprocessor(images=__snake_case , return_tensors=__snake_case ) )
return inputs
| 207 | 0 |
from __future__ import annotations
import bisect
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 0, lowerCAmelCase_ = -1 ):
"""simple docstring"""
if hi < 0:
SCREAMING_SNAKE_CASE =len(lowerCAmelCase_ )
while lo < hi:
SCREAMING_SNAKE_CASE =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
SCREAMING_SNAKE_CASE =mid + 1
else:
SCREAMING_SNAKE_CASE =mid
return lo
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 0, lowerCAmelCase_ = -1 ):
"""simple docstring"""
if hi < 0:
SCREAMING_SNAKE_CASE =len(lowerCAmelCase_ )
while lo < hi:
SCREAMING_SNAKE_CASE =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
SCREAMING_SNAKE_CASE =mid + 1
else:
SCREAMING_SNAKE_CASE =mid
return lo
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 0, lowerCAmelCase_ = -1 ):
"""simple docstring"""
sorted_collection.insert(bisect_left(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 0, lowerCAmelCase_ = -1 ):
"""simple docstring"""
sorted_collection.insert(bisect_right(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =len(lowerCAmelCase_ ) - 1
while left <= right:
SCREAMING_SNAKE_CASE =left + (right - left) // 2
SCREAMING_SNAKE_CASE =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
SCREAMING_SNAKE_CASE =midpoint - 1
else:
SCREAMING_SNAKE_CASE =midpoint + 1
return None
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =bisect.bisect_left(lowerCAmelCase_, lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if right < left:
return None
SCREAMING_SNAKE_CASE =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_, lowerCAmelCase_, midpoint + 1, lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =input("Enter numbers separated by comma:\n").strip()
_lowerCamelCase =sorted(int(item) for item in user_input.split(","))
_lowerCamelCase =int(input("Enter a single number to be found in the list:\n"))
_lowerCamelCase =binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.')
| 704 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["PerceiverFeatureExtractor"]
_lowerCamelCase =["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = x_start
__magic_name__ : Optional[int] = fnc(__lowerCamelCase )
__magic_name__ : Dict = 0.0
for _ in range(__lowerCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
__magic_name__ : Optional[int] = (x_end - x_start) / steps + xa
__magic_name__ : Optional[int] = fnc(__lowerCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__magic_name__ : Optional[Any] = xa
__magic_name__ : Optional[int] = fxa
return length
if __name__ == "__main__":
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_SCREAMING_SNAKE_CASE : Any = 10
while i <= 10_00_00:
print(f"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10 | 436 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__(self : int , snake_case__ : Optional[int] , snake_case__ : Optional[int]=13 , snake_case__ : Tuple=32 , snake_case__ : Optional[Any]=3 , snake_case__ : Tuple=4 , snake_case__ : List[Any]=[10, 20, 30, 40] , snake_case__ : Dict=[2, 2, 3, 2] , snake_case__ : Union[str, Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : List[str]=37 , snake_case__ : List[Any]="gelu" , snake_case__ : Union[str, Any]=10 , snake_case__ : int=0.02 , snake_case__ : str=["stage2", "stage3", "stage4"] , snake_case__ : int=[2, 3, 4] , snake_case__ : List[Any]=None , ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = parent
snake_case : Optional[int] = batch_size
snake_case : Optional[Any] = image_size
snake_case : Union[str, Any] = num_channels
snake_case : Tuple = num_stages
snake_case : Union[str, Any] = hidden_sizes
snake_case : List[Any] = depths
snake_case : Dict = is_training
snake_case : List[Any] = use_labels
snake_case : str = intermediate_size
snake_case : Dict = hidden_act
snake_case : Optional[int] = num_labels
snake_case : Dict = initializer_range
snake_case : List[Any] = out_features
snake_case : int = out_indices
snake_case : List[Any] = scope
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : List[Any] = None
if self.use_labels:
snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case : str = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = ConvNextVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Any = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[Any] ) -> Tuple:
'''simple docstring'''
snake_case : str = ConvNextVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Tuple = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
snake_case : int = ConvNextVaBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Union[str, Any] = model(snake_case__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case : Dict = None
snake_case : Tuple = ConvNextVaBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : int = model(snake_case__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : Union[str, Any] = config_and_inputs
snake_case : int = {"pixel_values": pixel_values}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : str = config_and_inputs
snake_case : Optional[int] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ):
A__ : Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
A__ : int = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
A__ : str = False
A__ : Dict = False
A__ : Optional[int] = False
A__ : List[Any] = False
A__ : str = False
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = ConvNextVaModelTester(self )
snake_case : int = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case : Tuple = True
if model_class.__name__ in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]:
continue
snake_case : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
snake_case : str = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
snake_case : List[str] = model(**snake_case__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case : List[Any] = False
snake_case : Any = True
if (
model_class.__name__
in [*get_values(snake_case__ ), *get_values(snake_case__ )]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.gradient_checkpointing_enable()
model.train()
snake_case : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
snake_case : Any = model(**snake_case__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(snake_case__ )
snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Dict ):
snake_case : List[str] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
snake_case : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
snake_case : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : int = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[Any]:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Optional[int] = ConvNextVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase ( ):
snake_case : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(snake_case__ )
snake_case : List[str] = self.default_image_processor
snake_case : str = prepare_img()
snake_case : List[str] = preprocessor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
snake_case : str = model(**snake_case__ )
# verify the logits
snake_case : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
snake_case : str = torch.tensor([0.9996, 0.1966, -0.4386] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 204 | 0 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase_ ( _lowerCamelCase : int , _lowerCamelCase : int ):
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def lowerCamelCase_ ( _lowerCamelCase : int ):
lowerCamelCase_ = []
lowerCamelCase_ = 1_1
lowerCamelCase_ = int('''1''' + '''0''' * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 1_0
return solutions
def lowerCamelCase_ ( _lowerCamelCase : int = 2 ):
lowerCamelCase_ = 1.0
for fraction in fraction_list(_lowerCamelCase ):
lowerCamelCase_ = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution()) | 142 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Optional[int] = ["image_processor", "tokenizer"]
__lowercase :int = "ChineseCLIPImageProcessor"
__lowercase :Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
lowerCamelCase_ = kwargs.pop('''feature_extractor''' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = self.image_processor
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class | 142 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCAmelCase_ = '''\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'''
UpperCAmelCase_ = '''\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'''
UpperCAmelCase_ = '''\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'''
def UpperCAmelCase ( A__ , A__ ) -> Dict:
return float((preds == labels).mean() )
def UpperCAmelCase ( A__ , A__ ) -> int:
_snake_case : Dict = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Union[str, Any] = float(fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase ( A__ , A__ ) -> Optional[Any]:
_snake_case : Tuple = float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )
_snake_case : Union[str, Any] = float(spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __lowerCamelCase( self ):
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 718 |
from collections.abc import Iterable
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar('''_T''')
class __SCREAMING_SNAKE_CASE ( Generic[_T] ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : list[_T] = list(iterable or [] )
_snake_case : list[_T] = []
def __len__( self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
"""simple docstring"""
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self._stacka.append(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[int] = self._stacka.pop
_snake_case : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 519 | 0 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __A :
'''simple docstring'''
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
def a__ (self ) -> Any:
"""simple docstring"""
raise NotImplementedError()
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A = False , **A ) -> Dict:
"""simple docstring"""
_a = tokenizer
_a = skip_prompt
_a = decode_kwargs
# variables used in the streaming process
_a = []
_a = 0
_a = True
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
_a = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_a = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
_a = text[self.print_len :]
_a = []
_a = 0
# If the last token is a CJK character, we print the characters.
elif len(A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_a = text[self.print_len :]
self.print_len += len(A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_a = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(A )
self.on_finalized_text(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
if len(self.token_cache ) > 0:
_a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_a = text[self.print_len :]
_a = []
_a = 0
else:
_a = ''''''
_a = True
self.on_finalized_text(A , stream_end=A )
def a__ (self , A , A = False ) -> Any:
"""simple docstring"""
print(A , flush=A , end='''''' if not stream_end else None )
def a__ (self , A ) -> int:
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A = False , A = None , **A ) -> Tuple:
"""simple docstring"""
super().__init__(A , A , **A )
_a = Queue()
_a = None
_a = timeout
def a__ (self , A , A = False ) -> Optional[int]:
"""simple docstring"""
self.text_queue.put(A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__(self ) -> Union[str, Any]:
"""simple docstring"""
return self
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 11 |
import os
import sys
import unittest
__a: Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__a: Dict = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__a: Union[str, Any] = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = get_test_to_tester_mapping(lowerCamelCase )
_UpperCAmelCase = get_test_to_tester_mapping(lowerCamelCase )
_UpperCAmelCase = {"""BertModelTest""": """BertModelTester"""}
_UpperCAmelCase = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase = get_model_to_test_mapping(lowerCamelCase )
_UpperCAmelCase = get_model_to_test_mapping(lowerCamelCase )
_UpperCAmelCase = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
_UpperCAmelCase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase = get_model_to_tester_mapping(lowerCamelCase )
_UpperCAmelCase = get_model_to_tester_mapping(lowerCamelCase )
_UpperCAmelCase = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
_UpperCAmelCase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase ) | 108 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__magic_name__ : Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase, id=UpperCAmelCase )
| 705 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Dict:
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , '''decord''' )
self.check_model_type(lowerCamelCase )
def lowercase ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
__magic_name__ : List[str] = {}
if frame_sampling_rate is not None:
__magic_name__ : Optional[int] = frame_sampling_rate
if num_frames is not None:
__magic_name__ : Optional[Any] = num_frames
__magic_name__ : Union[str, Any] = {}
if top_k is not None:
__magic_name__ : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(lowerCamelCase , **lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 ) -> int:
"""simple docstring"""
if num_frames is None:
__magic_name__ : Any = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
__magic_name__ : str = BytesIO(requests.get(lowerCamelCase ).content )
__magic_name__ : Optional[int] = VideoReader(lowerCamelCase )
videoreader.seek(0 )
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Tuple = num_frames * frame_sampling_rate - 1
__magic_name__ : Tuple = np.linspace(lowerCamelCase , lowerCamelCase , num=lowerCamelCase , dtype=np.intaa )
__magic_name__ : Union[str, Any] = videoreader.get_batch(lowerCamelCase ).asnumpy()
__magic_name__ : List[str] = list(lowerCamelCase )
__magic_name__ : Tuple = self.image_processor(lowerCamelCase , return_tensors=self.framework )
return model_inputs
def lowercase ( self , lowerCamelCase ) -> str:
"""simple docstring"""
__magic_name__ : Union[str, Any] = self.model(**lowerCamelCase )
return model_outputs
def lowercase ( self , lowerCamelCase , lowerCamelCase=5 ) -> Optional[Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__magic_name__ : Dict = self.model.config.num_labels
if self.framework == "pt":
__magic_name__ : Tuple = model_outputs.logits.softmax(-1 )[0]
__magic_name__ , __magic_name__ : str = probs.topk(lowerCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__magic_name__ : List[str] = scores.tolist()
__magic_name__ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 336 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = list(range(len(snake_case ) ) )
SCREAMING_SNAKE_CASE:Any = [v / w for v, w in zip(snake_case , snake_case )]
index.sort(key=lambda snake_case : ratio[i] , reverse=snake_case )
SCREAMING_SNAKE_CASE:float = 0
SCREAMING_SNAKE_CASE:list[float] = [0] * len(snake_case )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE:List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE:str = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | """simple docstring"""
from __future__ import annotations
def _A( lowerCAmelCase ):
if len(lowerCAmelCase ) == 0:
return []
A__ , A__ : Dict = min(lowerCAmelCase ), max(lowerCAmelCase )
A__ : List[Any] = int(max_value - min_value ) + 1
A__ : list[list] = [[] for _ in range(lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase )
return [v for bucket in buckets for v in sorted(lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 363 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_snake_case : List[Any] = """"""
_snake_case : Union[str, Any] = """"""
_snake_case : int = """"""
_snake_case : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def _a ( ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = get_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print("Processing..." )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = update_image_and_anno(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for index, image in enumerate(_SCREAMING_SNAKE_CASE ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_SCREAMING_SNAKE_CASE = random_chars(32 )
_SCREAMING_SNAKE_CASE = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
_SCREAMING_SNAKE_CASE = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , _SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(_SCREAMING_SNAKE_CASE )} with {file_name}' )
_SCREAMING_SNAKE_CASE = []
for anno in new_annos[index]:
_SCREAMING_SNAKE_CASE = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(_SCREAMING_SNAKE_CASE )
with open(F'/{file_root}.txt' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _a ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for label_file in glob.glob(os.path.join(_SCREAMING_SNAKE_CASE , "*.txt" ) ):
_SCREAMING_SNAKE_CASE = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_SCREAMING_SNAKE_CASE ) as in_file:
_SCREAMING_SNAKE_CASE = in_file.readlines()
_SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , F'{label_name}.jpg' )
_SCREAMING_SNAKE_CASE = []
for obj_list in obj_lists:
_SCREAMING_SNAKE_CASE = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_SCREAMING_SNAKE_CASE )
labels.append(_SCREAMING_SNAKE_CASE )
return img_paths, labels
def _a ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 1 ):
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = img_list[idx]
path_list.append(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = anno_list[idx]
_SCREAMING_SNAKE_CASE = cva.imread(_SCREAMING_SNAKE_CASE )
if flip_type == 1:
_SCREAMING_SNAKE_CASE = cva.flip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for bbox in img_annos:
_SCREAMING_SNAKE_CASE = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_SCREAMING_SNAKE_CASE = cva.flip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for bbox in img_annos:
_SCREAMING_SNAKE_CASE = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_SCREAMING_SNAKE_CASE )
new_imgs_list.append(_SCREAMING_SNAKE_CASE )
return new_imgs_list, new_annos_lists, path_list
def _a ( _SCREAMING_SNAKE_CASE : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
_SCREAMING_SNAKE_CASE = ascii_lowercase + digits
return "".join(random.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 493 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _a ( _SCREAMING_SNAKE_CASE : int ):
# A local function to see if a dot lands in the circle.
def is_in_circle(_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
_SCREAMING_SNAKE_CASE = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_SCREAMING_SNAKE_CASE = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
_SCREAMING_SNAKE_CASE = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def _a ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , ):
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def _a ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ):
def identity_function(_SCREAMING_SNAKE_CASE : float ) -> float:
return x
_SCREAMING_SNAKE_CASE = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def _a ( _SCREAMING_SNAKE_CASE : int ):
def function_to_integrate(_SCREAMING_SNAKE_CASE : float ) -> float:
return sqrt(4.0 - x * x )
_SCREAMING_SNAKE_CASE = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 493 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase ( _A ) -> int:
lowercase : Tuple = botoa.client("""iam""" )
lowercase : Any = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_A , AssumeRolePolicyDocument=json.dumps(_A , indent=2 ) )
lowercase : List[str] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_A , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(_A , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def UpperCamelCase ( _A ) -> int:
lowercase : int = botoa.client("""iam""" )
return iam_client.get_role(RoleName=_A )["Role"]["Arn"]
def UpperCamelCase ( ) -> Any:
lowercase : Tuple = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , _A , )
lowercase : str = None
if credentials_configuration == 0:
lowercase : str = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
lowercase : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
lowercase : Dict = _ask_field("""AWS Access Key ID: """ )
lowercase : Any = aws_access_key_id
lowercase : List[str] = _ask_field("""AWS Secret Access Key: """ )
lowercase : List[Any] = aws_secret_access_key
lowercase : Any = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
lowercase : List[Any] = aws_region
lowercase : Optional[int] = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , _A , )
if role_management == 0:
lowercase : int = _ask_field("""Enter your IAM role name: """ )
else:
lowercase : List[str] = """accelerate_sagemaker_execution_role"""
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(_A )
lowercase : str = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
lowercase : int = None
if is_custom_docker_image:
lowercase : Tuple = _ask_field("""Enter your Docker image: """ , lambda _A : str(_A ).lower() )
lowercase : str = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
lowercase : Union[str, Any] = None
if is_sagemaker_inputs_enabled:
lowercase : Any = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda _A : str(_A ).lower() , )
lowercase : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
lowercase : Optional[Any] = None
if is_sagemaker_metrics_enabled:
lowercase : Tuple = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda _A : str(_A ).lower() , )
lowercase : Dict = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
lowercase : List[Any] = {}
lowercase : str = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
if use_dynamo:
lowercase : List[str] = """dynamo_"""
lowercase : int = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowercase : Tuple = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
if use_custom_options:
lowercase : Optional[int] = _ask_options(
"""Which mode do you want to use?""" , _A , lambda _A : TORCH_DYNAMO_MODES[int(_A )] , default="""default""" , )
lowercase : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
lowercase : List[str] = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
lowercase : Union[str, Any] = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
lowercase : Optional[int] = _ask_options(
_A , _A , lambda _A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_A )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowercase : Any = _ask_field(_A , lambda _A : str(_A ).lower() , default="""ml.p3.2xlarge""" )
lowercase : int = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowercase : Union[str, Any] = _ask_field(
"""How many machines do you want use? [1]: """ , _A , default=1 , )
lowercase : List[str] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=_A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_A , use_cpu=_A , dynamo_config=_A , eca_instance_type=_A , profile=_A , region=_A , iam_role_name=_A , mixed_precision=_A , num_machines=_A , sagemaker_inputs_file=_A , sagemaker_metrics_file=_A , )
| 264 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase (__snake_case ):
def __init__( self :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] ) ->Dict:
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Optional[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__magic_name__ , scheduler=__magic_name__ )
@torch.no_grad()
def __call__( self :Optional[int] , __magic_name__ :int = 1 , __magic_name__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ :float = 0.0 , __magic_name__ :int = 50 , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[str] = "pil" , __magic_name__ :bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __magic_name__ ):
lowercase : int = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Tuple = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__magic_name__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase : Dict = randn_tensor(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__magic_name__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Union[str, Any] = self.unet(__magic_name__ , __magic_name__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : int = self.scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , eta=__magic_name__ , use_clipped_model_output=__magic_name__ , generator=__magic_name__ ).prev_sample
lowercase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Any = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 264 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
a = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
a = '''▁'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Dict = ['''input_ids''', '''token_type_ids''']
UpperCAmelCase : Tuple = FNetTokenizer
def __init__( self : Dict , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : List[str]="[SEP]" , _UpperCAmelCase : Tuple="<pad>" , _UpperCAmelCase : Union[str, Any]="[CLS]" , _UpperCAmelCase : Dict="[MASK]" , **_UpperCAmelCase : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 505 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''spiece.model'''}
a = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
a = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a = '''▁'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : int="[CLS]" , _UpperCAmelCase : Dict="[SEP]" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : int="[SEP]" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="[CLS]" , _UpperCAmelCase : Dict="[MASK]" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.sp_model )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : str , _UpperCAmelCase : Optional[Any] ):
_A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[str] ):
if self.remove_space:
_A = ' '.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_A = unicodedata.normalize('NFKD' , _UpperCAmelCase )
_A = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : str ):
_A = self.preprocess_text(_UpperCAmelCase )
_A = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
_A = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[int] ):
return self.sp_model.PieceToId(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[Any] ):
return self.sp_model.IdToPiece(_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Any] ):
_A = []
_A = ''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_A = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 505 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : List[str] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 439 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = RoCBertTokenizer
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = i
lowercase__ : Any = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase__ : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = i
lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
lowercase__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def lowercase__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False
lowercase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""的""", """人""", """有"""]
lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = False
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_)
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Optional[int] = """你好,你是谁"""
lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
snake_case_ : Union[str, Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
snake_case_ : Optional[Any] = logging.getLogger()
def __snake_case ( ):
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''')
UpperCamelCase = parser.parse_args()
return args.f
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Dict="eval"):
UpperCamelCase = os.path.join(_UpperCAmelCase, f'{split}_results.json')
if os.path.exists(_UpperCAmelCase):
with open(_UpperCAmelCase, '''r''') as f:
return json.load(_UpperCAmelCase)
raise ValueError(f'can\'t find {path}')
snake_case_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
run_flax_glue.main()
UpperCamelCase = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
run_clm_flax.main()
UpperCamelCase = get_results(lowerCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
run_summarization_flax.main()
UpperCamelCase = get_results(lowerCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
run_mlm_flax.main()
UpperCamelCase = get_results(lowerCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
run_ta_mlm_flax.main()
UpperCamelCase = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = 7 if get_gpu_count() > 1 else 2
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
run_flax_ner.main()
UpperCamelCase = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(lowerCamelCase__ , '''argv''' , lowerCamelCase__ ):
run_qa.main()
UpperCamelCase = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 709 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : Optional[int]):
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCamelCase = len(_UpperCAmelCase) if (len(_UpperCAmelCase) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8), '''Stack'''.center(_UpperCAmelCase), '''Postfix'''.center(_UpperCAmelCase), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCAmelCase) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCAmelCase) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCAmelCase) == 0:
stack.append(_UpperCAmelCase) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCAmelCase) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(_UpperCAmelCase) # push x to stack
print(
x.center(8), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), sep=''' | ''', ) # Output in tabular format
while len(_UpperCAmelCase) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
''' '''.center(8), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), sep=''' | ''', ) # Output in tabular format
return "".join(_UpperCAmelCase) # return Postfix as str
def __snake_case ( _UpperCAmelCase : str):
UpperCamelCase = list(infix[::-1]) # reverse the infix equation
for i in range(len(_UpperCAmelCase)):
if infix[i] == "(":
UpperCamelCase = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_UpperCAmelCase)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
snake_case_ : str = input('\nEnter an Infix Equation = ') # Input an Infix equation
snake_case_ : Union[str, Any] = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 350 | 0 |
'''simple docstring'''
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
a__ = (0, 0)
a__ = None
a__ = 0
a__ = 0
a__ = 0
def __eq__( self , _a ):
"""simple docstring"""
return self.position == cell.position
def lowercase__ ( self ):
"""simple docstring"""
print(self.position )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a=(5, 5) ):
"""simple docstring"""
a__ = np.zeros(_a )
a__ = world_size[0]
a__ = world_size[1]
def lowercase__ ( self ):
"""simple docstring"""
print(self.w )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
a__ = cell.position[0]
a__ = cell.position[1]
a__ = []
for n in neughbour_cord:
a__ = current_x + n[0]
a__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
a__ = Cell()
a__ = (x, y)
a__ = cell
neighbours.append(_a )
return neighbours
def lowerCAmelCase_ ( a : Optional[Any] , a : Union[str, Any] , a : List[Any] ):
a__ = []
a__ = []
_open.append(a )
while _open:
a__ = np.argmin([n.f for n in _open] )
a__ = _open[min_f]
_closed.append(_open.pop(a ) )
if current == goal:
break
for n in world.get_neigbours(a ):
for c in _closed:
if c == n:
continue
a__ = current.g + 1
a__ , a__ = n.position
a__ , a__ = goal.position
a__ = (ya - ya) ** 2 + (xa - xa) ** 2
a__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a )
a__ = []
while current.parent is not None:
path.append(current.position )
a__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__A : str = Gridworld()
# Start position and goal
__A : List[str] = Cell()
__A : List[Any] = (0, 0)
__A : Any = Cell()
__A : int = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
__A : List[Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__A : Tuple = 1
print(world.w)
| 394 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Optional[Any] = 'blip_text_model'
def __init__( self , _a=3_0524 , _a=768 , _a=768 , _a=3072 , _a=768 , _a=12 , _a=8 , _a=512 , _a="gelu" , _a=1e-12 , _a=0.0 , _a=0.0 , _a=0.02 , _a=3_0522 , _a=2 , _a=0 , _a=102 , _a=True , _a=True , **_a , ):
"""simple docstring"""
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
a__ = vocab_size
a__ = hidden_size
a__ = encoder_hidden_size
a__ = intermediate_size
a__ = projection_dim
a__ = hidden_dropout_prob
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = max_position_embeddings
a__ = layer_norm_eps
a__ = hidden_act
a__ = initializer_range
a__ = attention_probs_dropout_prob
a__ = is_decoder
a__ = use_cache
@classmethod
def lowercase__ ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
a__ , a__ = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
a__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:str = 'blip_vision_model'
def __init__( self , _a=768 , _a=3072 , _a=512 , _a=12 , _a=12 , _a=384 , _a=16 , _a="gelu" , _a=1e-5 , _a=0.0 , _a=1e-10 , **_a , ):
"""simple docstring"""
super().__init__(**_a )
a__ = hidden_size
a__ = intermediate_size
a__ = projection_dim
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = patch_size
a__ = image_size
a__ = initializer_range
a__ = attention_dropout
a__ = layer_norm_eps
a__ = hidden_act
@classmethod
def lowercase__ ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
a__ , a__ = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
a__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Any = 'blip'
SCREAMING_SNAKE_CASE:List[str] = True
def __init__( self , _a=None , _a=None , _a=512 , _a=2.6592 , _a=256 , **_a , ):
"""simple docstring"""
super().__init__(**_a )
if text_config is None:
a__ = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
a__ = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
a__ = BlipTextConfig(**_a )
a__ = BlipVisionConfig(**_a )
a__ = self.vision_config.hidden_size
a__ = projection_dim
a__ = logit_scale_init_value
a__ = 1.0
a__ = 0.02
a__ = image_text_hidden_size
@classmethod
def lowercase__ ( cls , _a , _a , **_a ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = copy.deepcopy(self.__dict__ )
a__ = self.text_config.to_dict()
a__ = self.vision_config.to_dict()
a__ = self.__class__.model_type
return output
| 394 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def _A( UpperCamelCase__ : Iterable[str] , UpperCamelCase__ : int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
__lowercase = iter(UpperCamelCase__ )
while True:
__lowercase = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def _A( UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowercase = ''''''
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def _A( UpperCamelCase__ : str ) -> list[str]:
'''simple docstring'''
__lowercase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowercase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def _A( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = generate_table(UpperCamelCase__ )
__lowercase = prepare_input(UpperCamelCase__ )
__lowercase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _A( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = generate_table(UpperCamelCase__ )
__lowercase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 362 |
import argparse
import copy
def _A( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__lowercase = {}
with open(UpperCamelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__lowercase = []
_list.append([line.split()[1], line.split()[2]] )
__lowercase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__lowercase = []
_list.append([line.split()[0], line.split()[2]] )
__lowercase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _A( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(UpperCamelCase__ ) as f:
__lowercase = f.read(1 )
__lowercase = start_node
__lowercase = []
__lowercase = start_node
__lowercase = 0
while visiting not in first_solution:
__lowercase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase__ ) and k[0] not in first_solution:
__lowercase = k[1]
__lowercase = k[0]
first_solution.append(UpperCamelCase__ )
__lowercase = distance_of_first_solution + int(UpperCamelCase__ )
__lowercase = best_node
first_solution.append(UpperCamelCase__ )
__lowercase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__lowercase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def _A( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__lowercase = []
for n in solution[1:-1]:
__lowercase = solution.index(UpperCamelCase__ )
for kn in solution[1:-1]:
__lowercase = solution.index(UpperCamelCase__ )
if n == kn:
continue
__lowercase = copy.deepcopy(UpperCamelCase__ )
__lowercase = kn
__lowercase = n
__lowercase = 0
for k in _tmp[:-1]:
__lowercase = _tmp[_tmp.index(UpperCamelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__lowercase = distance + int(i[1] )
_tmp.append(UpperCamelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__lowercase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _A( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
__lowercase = 1
__lowercase = first_solution
__lowercase = []
__lowercase = distance_of_first_solution
__lowercase = solution
while count <= iters:
__lowercase = find_neighborhood(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = 0
__lowercase = neighborhood[index_of_best_solution]
__lowercase = len(UpperCamelCase__ ) - 1
__lowercase = False
while not found:
__lowercase = 0
while i < len(UpperCamelCase__ ):
if best_solution[i] != solution[i]:
__lowercase = best_solution[i]
__lowercase = solution[i]
break
__lowercase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__lowercase = True
__lowercase = best_solution[:-1]
__lowercase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__lowercase = cost
__lowercase = solution
else:
__lowercase = index_of_best_solution + 1
__lowercase = neighborhood[index_of_best_solution]
if len(UpperCamelCase__ ) >= size:
tabu_list.pop(0 )
__lowercase = count + 1
return best_solution_ever, best_cost
def _A( UpperCamelCase__ : List[Any]=None ) -> int:
'''simple docstring'''
__lowercase = generate_neighbours(args.File )
__lowercase , __lowercase = generate_first_solution(
args.File , UpperCamelCase__ )
__lowercase , __lowercase = tabu_search(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 362 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase_ ( a_ ):
_A : int = 'wav2vec2'
def __init__( self , snake_case__=32 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=1_28 , snake_case__=16 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=3_20 , snake_case__=2 , snake_case__=0.1 , snake_case__=1_00 , snake_case__=2_56 , snake_case__=2_56 , snake_case__=0.1 , snake_case__="sum" , snake_case__=False , snake_case__=False , snake_case__=2_56 , snake_case__=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=5_12 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=3 , snake_case__=2 , snake_case__=3 , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_norm
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layerdrop
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = vocab_size
UpperCAmelCase = do_stable_layer_norm
UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase = num_codevectors_per_group
UpperCAmelCase = num_codevector_groups
UpperCAmelCase = contrastive_logits_temperature
UpperCAmelCase = feat_quantizer_dropout
UpperCAmelCase = num_negatives
UpperCAmelCase = codevector_dim
UpperCAmelCase = proj_codevector_dim
UpperCAmelCase = diversity_loss_weight
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# adapter
UpperCAmelCase = add_adapter
UpperCAmelCase = adapter_kernel_size
UpperCAmelCase = adapter_stride
UpperCAmelCase = num_adapter_layers
UpperCAmelCase = output_hidden_size or hidden_size
UpperCAmelCase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = xvector_output_dim
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 673 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( a_ , unittest.TestCase ):
_A : List[str] = XLMRobertaTokenizer
_A : List[str] = XLMRobertaTokenizerFast
_A : Optional[Any] = True
_A : List[str] = True
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XLMRobertaTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = """<pad>"""
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case__ ) , 10_02 )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = XLMRobertaTokenizer(snake_case__ , keep_accents=snake_case__ )
UpperCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case__ , f.name )
UpperCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=snake_case__ )
UpperCAmelCase = pickle.dumps(snake_case__ )
pickle.loads(snake_case__ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = tokenizer.tokenize(snake_case__ )
UpperCAmelCase = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
UpperCAmelCase = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(snake_case__ )
UpperCAmelCase = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = """Hello World!"""
UpperCAmelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
UpperCAmelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = {"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 673 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : List[Any] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 402 | 0 |
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase_ ( ):
'''simple docstring'''
print(sum_of_series(1, 1, 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : List[Any] = "trocr"
_A : Optional[int] = ["past_key_values"]
_A : Dict = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : str ,_UpperCamelCase : Dict=5_0_2_6_5 ,_UpperCamelCase : int=1_0_2_4 ,_UpperCamelCase : Union[str, Any]=1_2 ,_UpperCamelCase : Union[str, Any]=1_6 ,_UpperCamelCase : List[Any]=4_0_9_6 ,_UpperCamelCase : str="gelu" ,_UpperCamelCase : Dict=5_1_2 ,_UpperCamelCase : List[Any]=0.1 ,_UpperCamelCase : Dict=0.0 ,_UpperCamelCase : Optional[int]=0.0 ,_UpperCamelCase : Union[str, Any]=2 ,_UpperCamelCase : str=0.02 ,_UpperCamelCase : Any=0.0 ,_UpperCamelCase : Optional[int]=True ,_UpperCamelCase : str=False ,_UpperCamelCase : int=True ,_UpperCamelCase : Tuple=True ,_UpperCamelCase : str=1 ,_UpperCamelCase : Optional[Any]=0 ,_UpperCamelCase : str=2 ,**_UpperCamelCase : Any ,) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =vocab_size
SCREAMING_SNAKE_CASE__ =d_model
SCREAMING_SNAKE_CASE__ =decoder_layers
SCREAMING_SNAKE_CASE__ =decoder_attention_heads
SCREAMING_SNAKE_CASE__ =decoder_ffn_dim
SCREAMING_SNAKE_CASE__ =activation_function
SCREAMING_SNAKE_CASE__ =max_position_embeddings
SCREAMING_SNAKE_CASE__ =dropout
SCREAMING_SNAKE_CASE__ =attention_dropout
SCREAMING_SNAKE_CASE__ =activation_dropout
SCREAMING_SNAKE_CASE__ =init_std
SCREAMING_SNAKE_CASE__ =decoder_layerdrop
SCREAMING_SNAKE_CASE__ =use_cache
SCREAMING_SNAKE_CASE__ =scale_embedding
SCREAMING_SNAKE_CASE__ =use_learned_position_embeddings
SCREAMING_SNAKE_CASE__ =layernorm_embedding
super().__init__(
pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,decoder_start_token_id=_UpperCamelCase ,**_UpperCamelCase ,)
| 588 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase_ : List[Any] = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , __A : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
__A : Union[str, Any] = primes[group]["""prime"""]
__A : List[str] = primes[group]["""generator"""]
__A : int = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCAmelCase_ ( self : List[str] ):
return hex(self.__private_key )[2:]
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Any = pow(self.generator , self.__private_key , self.prime )
return hex(__A )[2:]
def lowerCAmelCase_ ( self : str , __A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__A , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCAmelCase_ ( self : Optional[int] , __A : str ):
__A : List[str] = int(__A , base=16 )
if not self.is_valid_public_key(__A ):
raise ValueError("""Invalid public key""" )
__A : Union[str, Any] = pow(__A , self.__private_key , self.prime )
return shaaaa(str(__A ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase_ ( __A : int , __A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__A , (prime - 1) // 2 , __A ) == 1
)
@staticmethod
def lowerCAmelCase_ ( __A : str , __A : str , __A : int = 14 ):
__A : int = int(__A , base=16 )
__A : List[str] = int(__A , base=16 )
__A : Union[str, Any] = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(__A , __A ):
raise ValueError("""Invalid public key""" )
__A : int = pow(__A , __A , __A )
return shaaaa(str(__A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def __A ( a_ : List[Any] ):
lowerCAmelCase : Any = tf.convert_to_tensor(a_ )
lowerCAmelCase : List[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) ))
return x * cdf
def __A ( a_ : int ):
lowerCAmelCase : Dict = tf.convert_to_tensor(a_ )
lowerCAmelCase : int = tf.cast(math.pi ,x.dtype )
lowerCAmelCase : Dict = tf.cast(0.0_4_4_7_1_5 ,x.dtype )
lowerCAmelCase : Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(a_ ,3 )) ))
return x * cdf
def __A ( a_ : Union[str, Any] ):
lowerCAmelCase : Any = tf.convert_to_tensor(a_ )
return x * tf.tanh(tf.math.softplus(a_ ) )
def __A ( a_ : List[str] ):
lowerCAmelCase : Dict = tf.convert_to_tensor(a_ )
lowerCAmelCase : Any = tf.cast(0.0_4_4_7_1_5 ,x.dtype )
lowerCAmelCase : Optional[int] = tf.cast(0.7_9_7_8_8_4_5_6_0_8 ,x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __A ( a_ : Union[str, Any] ):
lowerCAmelCase : Optional[int] = tf.convert_to_tensor(a_ )
lowerCAmelCase : List[Any] = tf.cast(1.7_0_2 ,x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __A ( a_ : List[Any] ):
return tf.clip_by_value(_gelu(a_ ) ,-1_0 ,1_0 )
def __A ( a_ : List[Any] ,a_ : List[Any]=-1 ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = tf.split(a_ ,2 ,axis=a_ )
return a * tf.math.sigmoid(a_ )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def __A ( a_ : Optional[Any] ):
return tf.keras.activations.gelu(a_ ,approximate=a_ )
lowerCAmelCase = tf.keras.activations.gelu
lowerCAmelCase = approximate_gelu_wrap
else:
lowerCAmelCase = _gelu
lowerCAmelCase = _gelu_new
lowerCAmelCase = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def __A ( a_ : int ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 525 | 0 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not sentence:
return ""
snake_case_ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0], sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 92 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
a_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
a_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
a_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = None, __SCREAMING_SNAKE_CASE = False, ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
snake_case_ : int = new_id
# turn into Numpy arrays
snake_case_ : int = np.array(__SCREAMING_SNAKE_CASE )
snake_case_ : Any = np.array(__SCREAMING_SNAKE_CASE )
if reduce_labels:
snake_case_ : str = 2_5_5
snake_case_ : str = label - 1
snake_case_ : List[Any] = 2_5_5
snake_case_ : Union[str, Any] = label != ignore_index
snake_case_ : Dict = np.not_equal(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = pred_label[mask]
snake_case_ : Any = np.array(__SCREAMING_SNAKE_CASE )[mask]
snake_case_ : List[str] = pred_label[pred_label == label]
snake_case_ : int = np.histogram(__SCREAMING_SNAKE_CASE, bins=__SCREAMING_SNAKE_CASE, range=(0, num_labels - 1) )[0]
snake_case_ : Tuple = np.histogram(__SCREAMING_SNAKE_CASE, bins=__SCREAMING_SNAKE_CASE, range=(0, num_labels - 1) )[0]
snake_case_ : List[Any] = np.histogram(__SCREAMING_SNAKE_CASE, bins=__SCREAMING_SNAKE_CASE, range=(0, num_labels - 1) )[0]
snake_case_ : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = None, __SCREAMING_SNAKE_CASE = False, ):
"""simple docstring"""
snake_case_ : List[Any] = np.zeros((num_labels,), dtype=np.floataa )
snake_case_ : List[str] = np.zeros((num_labels,), dtype=np.floataa )
snake_case_ : int = np.zeros((num_labels,), dtype=np.floataa )
snake_case_ : int = np.zeros((num_labels,), dtype=np.floataa )
for result, gt_seg_map in zip(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = intersect_and_union(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = None, __SCREAMING_SNAKE_CASE = None, __SCREAMING_SNAKE_CASE = False, ):
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Tuple = total_intersect_and_union(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# compute metrics
snake_case_ : Union[str, Any] = {}
snake_case_ : int = total_area_intersect.sum() / total_area_label.sum()
snake_case_ : str = total_area_intersect / total_area_union
snake_case_ : Union[str, Any] = total_area_intersect / total_area_label
snake_case_ : List[str] = np.nanmean(__SCREAMING_SNAKE_CASE )
snake_case_ : str = np.nanmean(__SCREAMING_SNAKE_CASE )
snake_case_ : Any = all_acc
snake_case_ : Union[str, Any] = iou
snake_case_ : Tuple = acc
if nan_to_num is not None:
snake_case_ : str = {metric: np.nan_to_num(__SCREAMING_SNAKE_CASE, nan=__SCREAMING_SNAKE_CASE ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def snake_case__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
}) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ):
snake_case_ : Optional[Any] = mean_iou(
results=lowercase_ , gt_seg_maps=lowercase_ , num_labels=lowercase_ , ignore_index=lowercase_ , nan_to_num=lowercase_ , label_map=lowercase_ , reduce_labels=lowercase_ , )
return iou_result
| 92 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> Union[str, Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self) -> Any:
raise NotImplementedError() | 34 |
def UpperCamelCase_( _A :list[list] )-> list[list]:
UpperCamelCase__ = current_set.copy()
for row_index, row in enumerate(_A ):
UpperCamelCase__ = row[0]
for column_index, column in enumerate(_A ):
if magnitude == 0:
UpperCamelCase__ = column
continue
UpperCamelCase__ = column / magnitude
# Subtract to cancel term
UpperCamelCase__ = current_set[0]
UpperCamelCase__ = [first_row]
UpperCamelCase__ = current_set[1::]
for row in current_set:
UpperCamelCase__ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_A )
continue
for column_index in range(len(_A ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_A )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCamelCase__ = final_set[0]
UpperCamelCase__ = []
UpperCamelCase__ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCamelCase__ = simplify(_A )
for i in range(len(_A ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _A )
UpperCamelCase__ = resultant
return final_set
def UpperCamelCase_( _A :list[list] )-> list:
if len(_A ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
UpperCamelCase__ = len(_A ) + 1
if any(len(_A ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(_A , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(_A ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCamelCase__ = equations.copy()
if any(0 in row for row in data_set ):
UpperCamelCase__ = data_set.copy()
UpperCamelCase__ = []
for row_index, row in enumerate(_A ):
if 0 not in row:
UpperCamelCase__ = data_set.pop(_A )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , _A )
UpperCamelCase__ = data_set.copy()
UpperCamelCase__ = simplify(_A )
UpperCamelCase__ = simplified[::-1]
UpperCamelCase__ = []
for row in simplified:
UpperCamelCase__ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCamelCase__ = row.copy()[: len(_A ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_A ) == 0:
solutions.append(0 )
continue
UpperCamelCase__ = temp_row[1::]
UpperCamelCase__ = temp_row[::-1]
for column_index, column in enumerate(_A ):
current_solution -= column * solutions[column_index]
solutions.append(_A )
UpperCamelCase__ = []
for item in solutions:
final.append(float(round(_A , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 551 | 0 |
import csv
import tweepy
# Twitter API credentials
_SCREAMING_SNAKE_CASE : Tuple = ''
_SCREAMING_SNAKE_CASE : Any = ''
_SCREAMING_SNAKE_CASE : Tuple = ''
_SCREAMING_SNAKE_CASE : Dict = ''
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> None:
"""simple docstring"""
A__ : Union[str, Any] = tweepy.OAuthHandler(__UpperCamelCase , __UpperCamelCase )
auth.set_access_token(__UpperCamelCase , __UpperCamelCase )
A__ : Tuple = tweepy.API(__UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
A__ : List[Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
A__ : List[str] = api.user_timeline(screen_name=__UpperCamelCase , count=2_00 )
# save most recent tweets
alltweets.extend(__UpperCamelCase )
# save the id of the oldest tweet less one
A__ : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__UpperCamelCase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
A__ : Union[str, Any] = api.user_timeline(
screen_name=__UpperCamelCase , count=2_00 , max_id=__UpperCamelCase )
# save most recent tweets
alltweets.extend(__UpperCamelCase )
# update the id of the oldest tweet less one
A__ : Optional[int] = alltweets[-1].id - 1
print(F"...{len(__UpperCamelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
A__ : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , '''w''' ) as f:
A__ : Optional[Any] = csv.writer(__UpperCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32') | 55 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} | 55 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _A , unittest.TestCase ):
snake_case_ = GPTSanJapaneseTokenizer
snake_case_ = False
snake_case_ = {"""do_clean_text""": False, """add_prefix_space""": False}
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase__ : Tuple = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase__ : Any = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
UpperCAmelCase__ : Optional[int] = {"unk_token": "<unk>"}
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file ,"""w""" ) as emoji_writer:
emoji_writer.write(json.dumps(A ) )
def __lowercase ( self : str ,**A : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : Tuple ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = "こんにちは、世界。 \nこんばんは、㔺界。😀"
UpperCAmelCase__ : Dict = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def __lowercase ( self : Union[str, Any] ,A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_input_output_texts(A )
UpperCAmelCase__ : Any = tokenizer.encode(A ,add_special_tokens=A )
UpperCAmelCase__ : Dict = tokenizer.decode(A ,clean_up_tokenization_spaces=A )
return text, ids
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : str ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : Any = "こんにちは、世界。 こんばんは、㔺界。"
UpperCAmelCase__ : str = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
UpperCAmelCase__ : Tuple = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
# Testing conversion to ids without special tokens
UpperCAmelCase__ : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A ,A )
# Testing conversion to ids with special tokens
UpperCAmelCase__ : Dict = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : Any = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A ,A )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : Any = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
UpperCAmelCase__ : Tuple = "こんにちは、、、、世界。こんばんは、、、、世界。"
UpperCAmelCase__ : Optional[int] = tokenizer.encode(A )
UpperCAmelCase__ : Tuple = tokenizer.decode(A )
self.assertEqual(A ,A )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
UpperCAmelCase__ : List[Any] = "こんにちは、世界。"
UpperCAmelCase__ : Optional[Any] = "こんばんは、㔺界。😀"
UpperCAmelCase__ : Any = "こんにちは、世界。こんばんは、世界。😀"
UpperCAmelCase__ : List[str] = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase__ : Optional[int] = tokenizer.encode("""""" ,prefix_text=prefix_text + input_text )
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode(A ,prefix_text=A )
UpperCAmelCase__ : Union[str, Any] = tokenizer.decode(A )
UpperCAmelCase__ : List[Any] = tokenizer.decode(A )
UpperCAmelCase__ : List[Any] = tokenizer.decode(A )
self.assertEqual(A ,A )
self.assertEqual(A ,A )
self.assertEqual(A ,A )
@slow
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
UpperCAmelCase__ : Any = "こんにちは、世界。"
UpperCAmelCase__ : Dict = "こんばんは、㔺界。😀"
UpperCAmelCase__ : List[str] = len(tokenizer.encode(A ) ) - 2
UpperCAmelCase__ : Union[str, Any] = len(tokenizer.encode(A ) ) - 2
UpperCAmelCase__ : Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase__ : Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase__ : Optional[Any] = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase__ : int = tokenizer("""""" ,prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase__ : List[str] = tokenizer(A ,prefix_text=A ).token_type_ids
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode("""""" ,prefix_text="""あンいワ""" )
UpperCAmelCase__ : Dict = tokenizer.encode("""いワ""" ,prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(A ) ,tokenizer.decode(A ) )
self.assertEqual(tokenizer.decode(A ) ,tokenizer.decode(A ) )
self.assertNotEqual(A ,A )
self.assertNotEqual(A ,A )
self.assertEqual(x_token_a[1] ,x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] ,x_token_a[3] ) # SEG token
@slow
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
UpperCAmelCase__ : Optional[int] = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
UpperCAmelCase__ : Tuple = tokenizer(A ,padding=A )
UpperCAmelCase__ : Optional[Any] = tokenizer.batch_encode_plus(A ,padding=A )
# fmt: off
UpperCAmelCase__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
UpperCAmelCase__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids ,A )
self.assertListEqual(x_token.token_type_ids ,A )
self.assertListEqual(x_token.attention_mask ,A )
self.assertListEqual(x_token_a.input_ids ,A )
self.assertListEqual(x_token_a.token_type_ids ,A )
self.assertListEqual(x_token_a.attention_mask ,A )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
# tokenizer has no padding token
pass
| 65 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase_ ( __snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def lowercase_ ( __snake_case : str ) -> Tuple:
'''simple docstring'''
for char in word:
snake_case__ :Dict = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowercase_ ( __snake_case : List[str] ) -> Any:
'''simple docstring'''
snake_case__ :Optional[int] = set()
for token in tokens:
snake_case__ :Dict = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
snake_case__ :Tuple = list(__snake_case )
return word_list
def lowercase_ ( __snake_case : List[str] , __snake_case : set() ) -> int:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case__ :List[str] = max([len(__snake_case ) for w in chinese_word_set] )
snake_case__ :str = bert_tokens
snake_case__ , snake_case__ :Dict = 0, len(__snake_case )
while start < end:
snake_case__ :Any = True
if is_chinese(bert_word[start] ):
snake_case__ :Union[str, Any] = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
snake_case__ :str = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case__ :int = "##" + bert_word[j]
snake_case__ :str = start + i
snake_case__ :Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( __snake_case : List[str] , __snake_case : LTP , __snake_case : BertTokenizer ) -> List[Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = []
for i in range(0 , len(__snake_case ) , 1_00 ):
snake_case__ :Any = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws
snake_case__ :Optional[Any] = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
snake_case__ :int = []
for i in range(0 , len(__snake_case ) , 1_00 ):
snake_case__ :str = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(__snake_case ) == len(__snake_case )
snake_case__ :Union[str, Any] = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
snake_case__ :Dict = []
for id in input_ids:
snake_case__ :Tuple = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
snake_case__ :Tuple = add_sub_symbol(__snake_case , __snake_case )
snake_case__ :Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
snake_case__ :Optional[Any] = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowercase_ ( __snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case__ :Optional[int] = f.readlines()
snake_case__ :Union[str, Any] = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case__ :Optional[int] = LTP(args.ltp ) # faster in GPU device
snake_case__ :Optional[int] = BertTokenizer.from_pretrained(args.bert )
snake_case__ :str = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case__ :List[str] = [json.dumps(__snake_case ) + "\n" for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__UpperCAmelCase : str = parser.parse_args()
main(args) | 241 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[Any] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[Any] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
class lowerCAmelCase_( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = ['''flax''']
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def UpperCAmelCase_ ( cls ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
requires_backends(cls ,["""flax"""] )
| 720 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
"""simple docstring"""
lowerCAmelCase__ : Any = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCAmelCase__ : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> int:
super().__init__()
self.register_modules(
text_encoder=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ,movq=__UpperCAmelCase ,)
lowerCAmelCase__ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
if latents is None:
lowerCAmelCase__ : Optional[int] = randn_tensor(__UpperCAmelCase ,generator=__UpperCAmelCase ,device=__UpperCAmelCase ,dtype=__UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCAmelCase__ : int = latents.to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,) -> Any:
lowerCAmelCase__ : List[str] = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else 1
# get prompt text embeddings
lowerCAmelCase__ : Any = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,truncation=__UpperCAmelCase ,max_length=77 ,return_attention_mask=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = text_inputs.input_ids
lowerCAmelCase__ : Optional[int] = self.tokenizer(__UpperCAmelCase ,padding="""longest""" ,return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase__ : List[Any] = text_input_ids.to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = text_inputs.attention_mask.to(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.text_encoder(
input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = prompt_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Optional[int] = text_encoder_hidden_states.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Union[str, Any] = text_mask.repeat_interleave(__UpperCAmelCase ,dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ : List[str]
if negative_prompt is None:
lowerCAmelCase__ : Tuple = [""""""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="""
F""" {type(__UpperCAmelCase )}.""" )
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
lowerCAmelCase__ : int = negative_prompt
lowerCAmelCase__ : List[str] = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,max_length=77 ,truncation=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Dict = uncond_input.input_ids.to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = uncond_input.attention_mask.to(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = self.text_encoder(
input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ : Optional[int] = negative_prompt_embeds.shape[1]
lowerCAmelCase__ : Optional[Any] = negative_prompt_embeds.repeat(1 ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = uncond_text_encoder_hidden_states.shape[1]
lowerCAmelCase__ : Tuple = uncond_text_encoder_hidden_states.repeat(1 ,__UpperCAmelCase ,1 )
lowerCAmelCase__ : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,__UpperCAmelCase ,-1 )
lowerCAmelCase__ : List[str] = uncond_text_mask.repeat_interleave(__UpperCAmelCase ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCAmelCase__ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCAmelCase__ : Tuple = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase__ : Any = torch.device(F"""cuda:{gpu_id}""" )
lowerCAmelCase__ : Union[str, Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase__ : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" ,silence_dtype_warnings=__UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase__ : Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = cpu_offload_with_hook(__UpperCAmelCase ,__UpperCAmelCase ,prev_module_hook=__UpperCAmelCase )
if self.safety_checker is not None:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = cpu_offload_with_hook(self.safety_checker ,__UpperCAmelCase ,prev_module_hook=__UpperCAmelCase )
# We'll offload the last model manually.
lowerCAmelCase__ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ) -> Optional[int]:
if not hasattr(self.unet ,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 100 ,__UpperCAmelCase = 4.0 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,) -> Tuple:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = 1
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
lowerCAmelCase__ : str = self._execution_device
lowerCAmelCase__ : List[Any] = batch_size * num_images_per_prompt
lowerCAmelCase__ : int = guidance_scale > 1.0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._encode_prompt(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = torch.cat(__UpperCAmelCase ,dim=0 )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = torch.cat(__UpperCAmelCase ,dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ : Union[str, Any] = image_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Optional[Any] = negative_image_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=__UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase ,device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps
lowerCAmelCase__ : Dict = self.unet.config.in_channels
lowerCAmelCase__ , lowerCAmelCase__ : Any = get_new_h_w(__UpperCAmelCase ,__UpperCAmelCase ,self.movq_scale_factor )
# create initial latent
lowerCAmelCase__ : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ : Optional[Any] = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
lowerCAmelCase__ : Any = self.unet(
sample=__UpperCAmelCase ,timestep=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,added_cond_kwargs=__UpperCAmelCase ,return_dict=__UpperCAmelCase ,)[0]
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ : Any = noise_pred.split(latents.shape[1] ,dim=1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = noise_pred.chunk(2 )
lowerCAmelCase__ , lowerCAmelCase__ : int = variance_pred.chunk(2 )
lowerCAmelCase__ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase__ : str = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"""variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase__ , lowerCAmelCase__ : Any = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : Optional[int] = self.scheduler.step(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,generator=__UpperCAmelCase ,).prev_sample
# post-processing
lowerCAmelCase__ : int = self.movq.decode(__UpperCAmelCase ,force_not_quantize=__UpperCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCAmelCase__ : Tuple = image * 0.5 + 0.5
lowerCAmelCase__ : str = image.clamp(0 ,1 )
lowerCAmelCase__ : List[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 160 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __snake_case ( __lowercase ):
_a = ["vqvae"]
def __init__( self : List[str] , A_ : AutoencoderKL , A_ : UNetaDConditionModel , A_ : Mel , A_ : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , vqvae=SCREAMING_SNAKE_CASE_)
def UpperCAmelCase__ ( self : Optional[Any]):
return 5_0 if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Optional[int] , A_ : int = 1 , A_ : str = None , A_ : np.ndarray = None , A_ : int = 0 , A_ : int = 0 , A_ : int = None , A_ : torch.Generator = None , A_ : float = 0 , A_ : float = 0 , A_ : torch.Generator = None , A_ : float = 0 , A_ : torch.Tensor = None , A_ : torch.Tensor = None , A_ : Optional[Any]=True , ):
lowerCAmelCase_ : int = steps or self.get_default_steps()
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
lowerCAmelCase_ : List[str] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=SCREAMING_SNAKE_CASE_ , device=self.device , )
lowerCAmelCase_ : Any = noise
lowerCAmelCase_ : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Any = self.mel.audio_slice_to_image(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : List[Any] = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
lowerCAmelCase_ : Optional[Any] = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ : Dict = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
lowerCAmelCase_ : List[Any] = self.vqvae.encode(torch.unsqueeze(SCREAMING_SNAKE_CASE_ , 0)).latent_dist.sample(
generator=SCREAMING_SNAKE_CASE_)[0]
lowerCAmelCase_ : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ : Any = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.scheduler.timesteps[start_step - 1])
lowerCAmelCase_ : str = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ : List[Any] = int(mask_start_secs * pixels_per_second)
lowerCAmelCase_ : str = int(mask_end_secs * pixels_per_second)
lowerCAmelCase_ : List[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , SCREAMING_SNAKE_CASE_):
lowerCAmelCase_ : Dict = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)['''sample''']
else:
lowerCAmelCase_ : Union[str, Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)['''sample''']
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_):
lowerCAmelCase_ : str = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )['''prev_sample''']
else:
lowerCAmelCase_ : Dict = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ : Optional[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ : Optional[int] = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ : Optional[int] = self.vqvae.decode(SCREAMING_SNAKE_CASE_)['''sample''']
lowerCAmelCase_ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1)
lowerCAmelCase_ : Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1).numpy()
lowerCAmelCase_ : Optional[int] = (images * 2_5_5).round().astype('''uint8''')
lowerCAmelCase_ : int = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(SCREAMING_SNAKE_CASE_ , mode='''RGB''').convert('''L''') for _ in images))
lowerCAmelCase_ : List[Any] = [self.mel.image_to_audio(SCREAMING_SNAKE_CASE_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(SCREAMING_SNAKE_CASE_)[:, np.newaxis, :]) , **ImagePipelineOutput(SCREAMING_SNAKE_CASE_))
@torch.no_grad()
def UpperCAmelCase__ ( self : List[str] , A_ : List[Image.Image] , A_ : int = 5_0):
assert isinstance(self.scheduler , SCREAMING_SNAKE_CASE_)
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
lowerCAmelCase_ : Any = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ : Optional[Any] = torch.Tensor(SCREAMING_SNAKE_CASE_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
lowerCAmelCase_ : str = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ : int = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ : int = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ : List[str] = 1 - alpha_prod_t
lowerCAmelCase_ : Optional[int] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)['''sample''']
lowerCAmelCase_ : Optional[Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ : Optional[int] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ : Any = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( A_ : torch.Tensor , A_ : torch.Tensor , A_ : float):
lowerCAmelCase_ : int = acos(torch.dot(torch.flatten(SCREAMING_SNAKE_CASE_) , torch.flatten(SCREAMING_SNAKE_CASE_)) / torch.norm(SCREAMING_SNAKE_CASE_) / torch.norm(SCREAMING_SNAKE_CASE_))
return sin((1 - alpha) * theta) * xa / sin(SCREAMING_SNAKE_CASE_) + sin(alpha * theta) * xa / sin(SCREAMING_SNAKE_CASE_)
| 171 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
from __future__ import annotations
class lowerCamelCase__ :
def __init__(self : Union[str, Any] , _snake_case : str , _snake_case : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Tuple = text, pattern
lowerCamelCase_ , lowerCamelCase_ : List[str] = len(_snake_case ), len(_snake_case )
def UpperCAmelCase_ (self : List[Any] , _snake_case : str ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCAmelCase_ (self : str , _snake_case : int ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCAmelCase_ (self : Optional[int] ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ : Tuple = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCamelCase_ : Dict = self.mismatch_in_text(_snake_case )
if mismatch_index == -1:
positions.append(_snake_case )
else:
lowerCamelCase_ : Tuple = self.match_in_pattern(self.text[mismatch_index] )
lowerCamelCase_ : Optional[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCamelCase = '''ABAABA'''
UpperCamelCase = '''AB'''
UpperCamelCase = BoyerMooreSearch(text, pattern)
UpperCamelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 144 |
from PIL import Image
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Image:
def brightness(lowerCamelCase__ ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(lowerCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 1_0_0)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 144 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list:
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase, _lowerCamelCase : List[Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_lowerCamelCase : int = result + left + right
return input_list
def lowerCamelCase_( _lowerCamelCase ) -> list:
'''simple docstring'''
if len(_lowerCamelCase ) <= 1:
return input_list
_lowerCamelCase : Dict = list(_lowerCamelCase )
# iteration for two-way merging
_lowerCamelCase : Dict = 2
while p <= len(_lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
_lowerCamelCase : int = i
_lowerCamelCase : Union[str, Any] = i + p - 1
_lowerCamelCase : Optional[Any] = (low + high + 1) // 2
_lowerCamelCase : Union[str, Any] = merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(_lowerCamelCase ):
_lowerCamelCase : Optional[int] = i
_lowerCamelCase : Any = merge(_lowerCamelCase , 0 , _lowerCamelCase , len(_lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowerCAmelCase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
_lowerCAmelCase : List[Any] = []
else:
_lowerCAmelCase : str = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted)) | 46 |
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(SCREAMING_SNAKE_CASE ) )
def _A ( ):
"""simple docstring"""
a__ : Any =7 * factorial(9 ) + 1
return sum(i for i in range(3 , SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(SCREAMING_SNAKE_CASE ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 563 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : int = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ : Optional[Any] = model_name.find("patch" )
UpperCAmelCase_ : Union[str, Any] = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
UpperCAmelCase_ : Any = XCLIPVisionConfig(patch_size=A__ ,num_frames=A__ )
if "large" in model_name:
UpperCAmelCase_ : List[str] = 7_68
UpperCAmelCase_ : Dict = 30_72
UpperCAmelCase_ : List[Any] = 12
UpperCAmelCase_ : str = 10_24
UpperCAmelCase_ : Dict = 40_96
UpperCAmelCase_ : List[str] = 16
UpperCAmelCase_ : List[Any] = 24
UpperCAmelCase_ : str = 7_68
UpperCAmelCase_ : Union[str, Any] = 30_72
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Dict = 3_36
UpperCAmelCase_ : Union[str, Any] = XCLIPConfig.from_text_vision_configs(A__ ,A__ )
if "large" in model_name:
UpperCAmelCase_ : List[Any] = 7_68
return config
def snake_case ( A__ ):
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase_ : str = name.replace("token_embedding.weight" ,"text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
UpperCAmelCase_ : str = name.replace("positional_embedding" ,"text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
UpperCAmelCase_ : Dict = name.replace("ln_1" ,"layer_norm1" )
if "ln_2" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("ln_2" ,"layer_norm2" )
if "c_fc" in name:
UpperCAmelCase_ : Optional[int] = name.replace("c_fc" ,"fc1" )
if "c_proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace("c_proj" ,"fc2" )
if name.startswith("transformer.resblocks" ):
UpperCAmelCase_ : Union[str, Any] = name.replace("transformer.resblocks" ,"text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ : str = name.replace("attn.out_proj" ,"self_attn.out_proj" )
if "ln_final" in name:
UpperCAmelCase_ : str = name.replace("ln_final" ,"text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ : Any = name.replace("visual.class_embedding" ,"vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
UpperCAmelCase_ : Optional[int] = name.replace("visual.positional_embedding" ,"vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
UpperCAmelCase_ : Tuple = name.replace("visual.transformer.resblocks" ,"vision_model.encoder.layers" )
if "visual.conv1" in name:
UpperCAmelCase_ : List[str] = name.replace("visual.conv1" ,"vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
UpperCAmelCase_ : Tuple = name.replace("visual.ln_pre" ,"vision_model.pre_layernorm" )
if "visual.ln_post" in name:
UpperCAmelCase_ : Dict = name.replace("visual.ln_post" ,"vision_model.post_layernorm" )
if "visual.proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace("visual.proj" ,"visual_projection.weight" )
if "text_projection" in name:
UpperCAmelCase_ : Tuple = name.replace("text_projection" ,"text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace("prompts_visual_proj" ,"prompts_visual_projection" )
if "prompts_visual_ln" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("prompts_visual_ln" ,"prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ : Dict = name.replace("positional" ,"position" )
if name.startswith("mit.resblocks" ):
UpperCAmelCase_ : Dict = name.replace("mit.resblocks" ,"mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
UpperCAmelCase_ : List[Any] = name.replace("prompts_generator.norm" ,"prompts_generator.layernorm" )
return name
def snake_case ( A__ ,A__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : str = orig_state_dict.pop(A__ )
if "attn.in_proj" in key:
UpperCAmelCase_ : str = key.split("." )
if key.startswith("visual" ):
UpperCAmelCase_ : List[str] = key_split[3]
UpperCAmelCase_ : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ : Optional[int] = val[
:dim, :
]
UpperCAmelCase_ : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Dict = val[
-dim:, :
]
else:
UpperCAmelCase_ : Optional[Any] = val[
:dim
]
UpperCAmelCase_ : str = val[
dim : dim * 2
]
UpperCAmelCase_ : str = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ : str = val[
:dim, :
]
UpperCAmelCase_ : Any = val[
dim : dim * 2, :
]
UpperCAmelCase_ : str = val[
-dim:, :
]
else:
UpperCAmelCase_ : Any = val[:dim]
UpperCAmelCase_ : Tuple = val[
dim : dim * 2
]
UpperCAmelCase_ : int = val[-dim:]
elif key.startswith("mit" ):
UpperCAmelCase_ : Union[str, Any] = key_split[2]
UpperCAmelCase_ : Optional[int] = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ : Optional[Any] = val[:dim, :]
UpperCAmelCase_ : List[str] = val[dim : dim * 2, :]
UpperCAmelCase_ : List[Any] = val[-dim:, :]
else:
UpperCAmelCase_ : List[Any] = val[:dim]
UpperCAmelCase_ : Optional[int] = val[dim : dim * 2]
UpperCAmelCase_ : Tuple = val[-dim:]
else:
UpperCAmelCase_ : Tuple = key_split[2]
UpperCAmelCase_ : Optional[int] = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ : str = val[:dim, :]
UpperCAmelCase_ : Any = val[
dim : dim * 2, :
]
UpperCAmelCase_ : int = val[-dim:, :]
else:
UpperCAmelCase_ : Dict = val[:dim]
UpperCAmelCase_ : List[Any] = val[
dim : dim * 2
]
UpperCAmelCase_ : Any = val[-dim:]
else:
UpperCAmelCase_ : Optional[int] = rename_key(A__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ : int = val.T
UpperCAmelCase_ : int = val
return orig_state_dict
def snake_case ( A__ ):
if num_frames == 8:
UpperCAmelCase_ : Union[str, Any] = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
UpperCAmelCase_ : Optional[int] = "eating_spaghetti.npy"
elif num_frames == 32:
UpperCAmelCase_ : Optional[int] = "eating_spaghetti_32_frames.npy"
UpperCAmelCase_ : str = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" ,filename=A__ ,repo_type="dataset" ,)
UpperCAmelCase_ : Union[str, Any] = np.load(A__ )
return list(A__ )
def snake_case ( A__ ,A__=None ,A__=False ):
UpperCAmelCase_ : Optional[int] = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
UpperCAmelCase_ : str = model_to_url[model_name]
UpperCAmelCase_ : int = 8
if "16-frames" in model_name:
UpperCAmelCase_ : Optional[int] = 16
elif "shot" in model_name:
UpperCAmelCase_ : Any = 32
UpperCAmelCase_ : Any = get_xclip_config(A__ ,A__ )
UpperCAmelCase_ : Optional[int] = XCLIPModel(A__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ : Dict = "pytorch_model.bin"
gdown.cached_download(A__ ,A__ ,quiet=A__ )
UpperCAmelCase_ : List[str] = torch.load(A__ ,map_location="cpu" )["model"]
else:
UpperCAmelCase_ : int = torch.hub.load_state_dict_from_url(A__ )["model"]
UpperCAmelCase_ : Optional[int] = convert_state_dict(A__ ,A__ )
UpperCAmelCase_ : int = XCLIPModel(A__ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model.load_state_dict(A__ ,strict=A__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ : List[str] = 3_36 if model_name == "xclip-large-patch14-16-frames" else 2_24
UpperCAmelCase_ : int = VideoMAEImageProcessor(size=A__ )
UpperCAmelCase_ : List[str] = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ : Dict = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ : Optional[Any] = XCLIPProcessor(image_processor=A__ ,tokenizer=A__ )
UpperCAmelCase_ : Tuple = prepare_video(A__ )
UpperCAmelCase_ : Optional[int] = processor(
text=["playing sports", "eating spaghetti", "go shopping"] ,videos=A__ ,return_tensors="pt" ,padding=A__ )
print("Shape of pixel values:" ,inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ : Any = model(**A__ )
# Verify outputs
UpperCAmelCase_ : Optional[Any] = outputs.logits_per_video
UpperCAmelCase_ : int = logits_per_video.softmax(dim=1 )
print("Probs:" ,A__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ : Any = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ : Optional[int] = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ : int = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ : Any = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ : List[str] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ : Dict = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ : List[Any] = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ : Optional[int] = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ : Optional[int] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ : Any = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ : Dict = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ : Tuple = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(A__ ,A__ ,atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(A__ ,organization="nielsr" )
processor.push_to_hub(A__ ,organization="nielsr" )
slow_tokenizer.push_to_hub(A__ ,organization="nielsr" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 463 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCAmelCase_ : List[Any] = ksize + 1
UpperCAmelCase_ : Optional[Any] = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(A__ ):
for x in range(A__ ):
# distance from center
UpperCAmelCase_ : Tuple = x - ksize // 2
UpperCAmelCase_ : Any = y - ksize // 2
# degree to radiant
UpperCAmelCase_ : int = theta / 1_80 * np.pi
UpperCAmelCase_ : Optional[int] = np.cos(_theta )
UpperCAmelCase_ : Union[str, Any] = np.sin(_theta )
# get kernel x
UpperCAmelCase_ : Tuple = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase_ : List[str] = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase_ : Dict = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCamelCase_ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCamelCase_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCamelCase_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCamelCase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCamelCase_ = out / out.max() * 255
lowerCamelCase_ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 463 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = StableDiffusionSAGPipeline
_lowercase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase : List[str] = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(UpperCamelCase_ )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any]=0 ) -> Any:
"""simple docstring"""
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowercase__ = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''.'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase__ = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''.'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase__ = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''.'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 43 |
import cva
import numpy as np
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ : Any = k
SCREAMING_SNAKE_CASE_ : List[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ):
"""simple docstring"""
return str(self.k )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = img.shape
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
SCREAMING_SNAKE_CASE_ : Tuple = img.copy()
SCREAMING_SNAKE_CASE_ : int = cva.cvtColor(_SCREAMING_SNAKE_CASE , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = np.gradient(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = dx**2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dy**2
SCREAMING_SNAKE_CASE_ : Optional[int] = dx * dy
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.04
SCREAMING_SNAKE_CASE_ : int = self.window_size // 2
for y in range(_SCREAMING_SNAKE_CASE , h - offset ):
for x in range(_SCREAMING_SNAKE_CASE , w - offset ):
SCREAMING_SNAKE_CASE_ : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : Optional[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : List[Any] = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ : List[str] = wxx + wyy
SCREAMING_SNAKE_CASE_ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase : List[Any] = HarrisCorner(0.04, 3)
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 511 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase_ :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( *__lowercase , **__lowercase) -> Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ : str = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''')
__UpperCamelCase :Dict = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = object_detector(examples[0] , threshold=0.0)
__UpperCamelCase :Dict = len(__lowercase)
self.assertGreater(__lowercase , 0)
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase),
'''label''': ANY(__lowercase),
'''box''': {'''xmin''': ANY(__lowercase), '''ymin''': ANY(__lowercase), '''xmax''': ANY(__lowercase), '''ymax''': ANY(__lowercase)},
}
for i in range(__lowercase)
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''')
def UpperCamelCase__ ( self) -> int:
pass
@require_torch
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Tuple = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''')
__UpperCamelCase :str = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
__UpperCamelCase :str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[str] = pipeline('''zero-shot-object-detection''')
__UpperCamelCase :List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
__UpperCamelCase :Dict = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''')
def UpperCamelCase__ ( self) -> Dict:
pass
@require_torch
@slow
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = 0.2
__UpperCamelCase :str = pipeline('''zero-shot-object-detection''')
__UpperCamelCase :Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[int] = 2
__UpperCamelCase :Union[str, Any] = pipeline('''zero-shot-object-detection''')
__UpperCamelCase :Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 720 | from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowercase = datasets.load_iris()
__lowercase = np.array(data['''data'''])
__lowercase = np.array(data['''target'''])
__lowercase = data['''target_names''']
__lowercase , __lowercase , __lowercase , __lowercase = train_test_split(X, y)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return np.linalg.norm(np.array(SCREAMING_SNAKE_CASE ) - np.array(SCREAMING_SNAKE_CASE ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
__UpperCamelCase :List[str] = []
for data_point in data:
__UpperCamelCase :Optional[int] = euclidean_distance(data_point[0] , SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCamelCase :Any = [i[1] for i in sorted(SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCamelCase :Union[str, Any] = Counter(SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 452 | 0 |
'''simple docstring'''
import math
def a_ ( _lowerCAmelCase ,_lowerCAmelCase = 0 ,_lowerCAmelCase = 0 ) -> list:
__lowerCamelCase : List[Any] = end or len(_lowerCAmelCase )
for i in range(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : Optional[int] = i
__lowerCamelCase : Tuple = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__lowerCamelCase : Dict = array[temp_index - 1]
temp_index -= 1
__lowerCamelCase : int = temp_index_value
return array
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> None: # Max Heap
__lowerCamelCase : str = index
__lowerCamelCase : Optional[Any] = 2 * index + 1 # Left Node
__lowerCamelCase : Optional[int] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__lowerCamelCase : Optional[Any] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__lowerCamelCase : Any = right_index
if largest != index:
__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = array[largest], array[index]
heapify(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def a_ ( _lowerCAmelCase ) -> list:
__lowerCamelCase : Union[str, Any] = len(_lowerCAmelCase )
for i in range(n // 2 ,-1 ,-1 ):
heapify(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
for i in range(n - 1 ,0 ,-1 ):
__lowerCamelCase ,__lowerCamelCase : Any = array[0], array[i]
heapify(_lowerCAmelCase ,0 ,_lowerCAmelCase )
return array
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> int:
__lowerCamelCase : List[Any] = low
__lowerCamelCase : List[str] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__lowerCamelCase ,__lowerCamelCase : List[Any] = array[j], array[i]
i += 1
def a_ ( _lowerCAmelCase ) -> list:
if len(_lowerCAmelCase ) == 0:
return array
__lowerCamelCase : Optional[int] = 2 * math.ceil(math.loga(len(_lowerCAmelCase ) ) )
__lowerCamelCase : Optional[Any] = 16
return intro_sort(_lowerCAmelCase ,0 ,len(_lowerCAmelCase ) ,_lowerCAmelCase ,_lowerCAmelCase )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_lowerCAmelCase )
max_depth -= 1
__lowerCamelCase : List[str] = median_of_a(_lowerCAmelCase ,_lowerCAmelCase ,start + ((end - start) // 2) + 1 ,end - 1 )
__lowerCamelCase : Any = partition(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
intro_sort(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : int = p
return insertion_sort(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input('Enter numbers separated by a comma : ').strip()
_UpperCamelCase = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 459 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'vocab.json'}
_UpperCamelCase = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_UpperCamelCase = {'mgp-str': 27}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , _a : Optional[Any] , _a : Any="[GO]" , _a : Optional[Any]="[GO]" , _a : Optional[int]="[s]" , _a : Optional[int]="[GO]" , **_a : Union[str, Any] ) -> Optional[int]:
super().__init__(
unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a , )
with open(_a , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : List[str] = json.load(_a )
__lowerCamelCase : Tuple = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : Union[str, Any] ) -> List[str]:
return len(self.vocab )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase ( self : Dict , _a : Tuple ) -> int:
__lowerCamelCase : Optional[Any] = []
for s in text:
char_tokens.extend(_a )
return char_tokens
def _lowercase ( self : Optional[Any] , _a : Any ) -> Dict:
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def _lowercase ( self : List[Any] , _a : Dict ) -> Optional[int]:
return self.decoder.get(_a )
def _lowercase ( self : Union[str, Any] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error('Vocabulary path ({}) should be a directory'.format(_a ) )
return
__lowerCamelCase : List[Any] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '\n' )
return (vocab_file,)
| 459 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 478 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase = logging.getLogger(__name__)
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: str = """summarization"""
lowerCamelCase__: List[str] = ["""loss"""]
lowerCamelCase__: List[str] = ROUGE_KEYS
lowerCamelCase__: Union[str, Any] = """rouge2"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ):
if hparams.sortish_sampler and hparams.gpus > 1:
a_ : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCamelCase_ , num_labels=lowerCamelCase_ , mode=self.mode , **lowerCamelCase_ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
a_ : Any = Path(self.output_dir ) / """metrics.json"""
a_ : Tuple = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
a_ : Dict = 0
a_ : Optional[Any] = defaultdict(lowerCamelCase_ )
a_ : str = self.config.model_type
a_ : Union[str, Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
a_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
a_ : int = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
a_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
a_ : Optional[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
a_ : int = get_git_info()["""repo_sha"""]
a_ : Union[str, Any] = hparams.num_workers
a_ : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase_ ):
a_ : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
a_ : List[str] = self.decoder_start_token_id
a_ : Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
a_ : Union[str, Any] = False
a_ : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
a_ : Union[str, Any] = self.hparams.eval_max_gen_length
else:
a_ : Optional[Any] = self.model.config.max_length
a_ : Optional[int] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : Dict[str, torch.Tensor] ):
a_ : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase_ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
a_ : List[Any] = True
return readable_batch
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
return self.model(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : List[int] ):
a_ : str = self.tokenizer.batch_decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return lmap(str.strip , lowerCamelCase_ )
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : dict ):
a_ : int = self.tokenizer.pad_token_id
a_ , a_ : Optional[int] = batch["""input_ids"""], batch["""attention_mask"""]
a_ : Optional[Any] = batch["""labels"""]
if isinstance(self.model , lowerCamelCase_ ):
a_ : List[Any] = self.model._shift_right(lowerCamelCase_ )
else:
a_ : Dict = shift_tokens_right(lowerCamelCase_ , lowerCamelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
a_ : int = decoder_input_ids
self.save_readable_batch(lowerCamelCase_ )
a_ : str = self(lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , use_cache=lowerCamelCase_ )
a_ : int = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
a_ : Any = nn.CrossEntropyLoss(ignore_index=lowerCamelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
a_ : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
a_ : int = nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )
a_ , a_ : str = label_smoothed_nll_loss(
lowerCamelCase_ , lowerCamelCase_ , self.hparams.label_smoothing , ignore_index=lowerCamelCase_ )
return (loss,)
@property
def UpperCAmelCase( self : Union[str, Any] ):
return self.tokenizer.pad_token_id
def UpperCAmelCase( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ):
a_ : Dict = self._step(lowerCamelCase_ )
a_ : Optional[int] = dict(zip(self.loss_names , lowerCamelCase_ ) )
# tokens per batch
a_ : str = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
a_ : Optional[int] = batch["""input_ids"""].shape[0]
a_ : int = batch["""input_ids"""].eq(self.pad ).sum()
a_ : str = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int]="val" ):
self.step_count += 1
a_ : Optional[Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
a_ : Tuple = losses["""loss"""]
a_ : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
a_ : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
a_ : torch.FloatTensor = torch.tensor(lowerCamelCase_ ).type_as(lowerCamelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase_ )
a_ : str = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
a_ : Union[str, Any] = self.step_count
self.metrics[prefix].append(lowerCamelCase_ ) # callback writes this to self.metrics_save_path
a_ : List[str] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCAmelCase( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
return calculate_rouge(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int , lowerCamelCase_ : dict ):
a_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
a_ : Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCamelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
a_ : Optional[int] = (time.time() - ta) / batch["""input_ids"""].shape[0]
a_ : List[str] = self.ids_to_clean_text(lowerCamelCase_ )
a_ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
a_ : Tuple = self._step(lowerCamelCase_ )
a_ : List[str] = dict(zip(self.loss_names , lowerCamelCase_ ) )
a_ : Dict = self.calc_generative_metrics(lowerCamelCase_ , lowerCamelCase_ )
a_ : Optional[int] = np.mean(lmap(lowerCamelCase_ , lowerCamelCase_ ) )
base_metrics.update(gen_time=lowerCamelCase_ , gen_len=lowerCamelCase_ , preds=lowerCamelCase_ , target=lowerCamelCase_ , **lowerCamelCase_ )
return base_metrics
def UpperCAmelCase( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : str , lowerCamelCase_ : Any ):
return self.validation_epoch_end(lowerCamelCase_ , prefix="""test""" )
def UpperCAmelCase( self : Any , lowerCamelCase_ : Any ):
a_ : List[str] = self.n_obs[type_path]
a_ : Dict = self.target_lens[type_path]
a_ : Optional[Any] = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase_ , n_obs=lowerCamelCase_ , max_target_length=lowerCamelCase_ , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : bool = False ):
a_ : List[str] = self.get_dataset(lowerCamelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
a_ : List[str] = dataset.make_sortish_sampler(lowerCamelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
a_ : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
def UpperCAmelCase( self : Any ):
a_ : int = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase_ )
return dataloader
def UpperCAmelCase( self : Dict ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase( self : List[Any] ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
BaseTransformer.add_model_specific_args(lowerCamelCase_ , lowerCamelCase_ )
add_generic_args(lowerCamelCase_ , lowerCamelCase_ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCamelCase_ , default=lowerCamelCase_ )
parser.add_argument("""--logger_name""" , type=lowerCamelCase_ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCamelCase_ , default=5_0_0 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCamelCase_ , default="""summarization""" , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCamelCase_ , default=0.0 , required=lowerCamelCase_ )
parser.add_argument("""--src_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--tgt_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--eval_beams""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ )
parser.add_argument(
"""--val_metric""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCamelCase_ , default=1 , required=lowerCamelCase_ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: List[Any] = """translation"""
lowerCamelCase__: int = ["""loss"""]
lowerCamelCase__: List[str] = ["""bleu"""]
lowerCamelCase__: Optional[Any] = """bleu"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Tuple ):
super().__init__(lowerCamelCase_ , **lowerCamelCase_ )
a_ : Union[str, Any] = hparams.src_lang
a_ : Optional[int] = hparams.tgt_lang
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
return calculate_bleu(lowerCamelCase_ , lowerCamelCase_ )
def _a ( __UpperCamelCase , __UpperCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__UpperCamelCase )
check_output_dir(__UpperCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
a_ : SummarizationModule = SummarizationModule(__UpperCamelCase )
else:
a_ : SummarizationModule = TranslationModule(__UpperCamelCase )
a_ : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
a_ : Union[str, Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = os.environ.get("""WANDB_PROJECT""" , __UpperCamelCase )
a_ : str = WandbLogger(name=model.output_dir.name , project=__UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
a_ : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
a_ : str = False
a_ : Tuple = args.val_metric == """loss"""
a_ : pl.Trainer = generic_train(
__UpperCamelCase , __UpperCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCamelCase ) , early_stopping_callback=__UpperCamelCase , logger=__UpperCamelCase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
a_ : Union[str, Any] = """"""
a_ : int = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=__UpperCamelCase ) )
if checkpoints:
a_ : Tuple = checkpoints[-1]
a_ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
main(args)
| 478 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class __UpperCamelCase ( _UpperCamelCase ):
__A : Optional[int] = 'blip_text_model'
def __init__( self , _UpperCamelCase=30524 , _UpperCamelCase=768 , _UpperCamelCase=768 , _UpperCamelCase=3072 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=8 , _UpperCamelCase=512 , _UpperCamelCase="gelu" , _UpperCamelCase=1e-12 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=30522 , _UpperCamelCase=2 , _UpperCamelCase=0 , _UpperCamelCase=102 , _UpperCamelCase=True , _UpperCamelCase=True , **_UpperCamelCase , ):
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , sep_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = encoder_hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = projection_dim
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = is_decoder
_UpperCAmelCase = use_cache
@classmethod
def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_UpperCAmelCase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
_UpperCAmelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __UpperCamelCase ( _UpperCamelCase ):
__A : List[Any] = 'blip_vision_model'
def __init__( self , _UpperCamelCase=768 , _UpperCamelCase=3072 , _UpperCamelCase=512 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=384 , _UpperCamelCase=16 , _UpperCamelCase="gelu" , _UpperCamelCase=1e-5 , _UpperCamelCase=0.0 , _UpperCamelCase=1e-10 , **_UpperCamelCase , ):
super().__init__(**_UpperCAmelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = projection_dim
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = patch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = hidden_act
@classmethod
def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_UpperCAmelCase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
_UpperCAmelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __UpperCamelCase ( _UpperCamelCase ):
__A : List[str] = 'blip'
__A : List[Any] = True
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=512 , _UpperCamelCase=2.6592 , _UpperCamelCase=256 , **_UpperCamelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
_UpperCAmelCase = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
_UpperCAmelCase = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
_UpperCAmelCase = BlipTextConfig(**_UpperCAmelCase )
_UpperCAmelCase = BlipVisionConfig(**_UpperCAmelCase )
_UpperCAmelCase = self.vision_config.hidden_size
_UpperCAmelCase = projection_dim
_UpperCAmelCase = logit_scale_init_value
_UpperCAmelCase = 1.0
_UpperCAmelCase = 0.02
_UpperCAmelCase = image_text_hidden_size
@classmethod
def UpperCamelCase( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output | 32 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = 'open-llama'
def __init__( self : str ,_UpperCAmelCase : int=100000 ,_UpperCAmelCase : List[str]=4096 ,_UpperCAmelCase : Dict=11008 ,_UpperCAmelCase : int=32 ,_UpperCAmelCase : Union[str, Any]=32 ,_UpperCAmelCase : List[str]="silu" ,_UpperCAmelCase : List[Any]=2048 ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : int=1E-6 ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Dict=0 ,_UpperCAmelCase : Optional[Any]=1 ,_UpperCAmelCase : Dict=2 ,_UpperCAmelCase : Tuple=False ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : List[str]=True ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Union[str, Any]=None ,**_UpperCAmelCase : Optional[int] ,):
_a : str = vocab_size
_a : str = max_position_embeddings
_a : List[str] = hidden_size
_a : Any = intermediate_size
_a : Union[str, Any] = num_hidden_layers
_a : Tuple = num_attention_heads
_a : int = hidden_act
_a : str = initializer_range
_a : Any = rms_norm_eps
_a : Dict = use_cache
_a : Optional[int] = kwargs.pop(
'use_memorry_efficient_attention' ,_UpperCAmelCase )
_a : int = hidden_dropout_prob
_a : int = attention_dropout_prob
_a : Union[str, Any] = use_stable_embedding
_a : str = shared_input_output_embedding
_a : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,tie_word_embeddings=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Optional[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
_a : Optional[Any] = self.rope_scaling.get('type' ,_UpperCAmelCase )
_a : Optional[Any] = self.rope_scaling.get('factor' ,_UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 358 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = StableUnCLIPPipeline
_snake_case : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_snake_case : int = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case : int = False
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Union[str, Any] = 32
UpperCamelCase__ :Tuple = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase__ , projection_dim=lowerCamelCase__ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCamelCase__ :Dict = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCamelCase__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCamelCase__ :str = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=lowerCamelCase__ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase__ )
UpperCamelCase__ :int = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
UpperCamelCase__ :Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
UpperCamelCase__ :List[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCamelCase__ :str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase__ , layers_per_block=1 , upcast_attention=lowerCamelCase__ , use_linear_projection=lowerCamelCase__ , )
torch.manual_seed(0 )
UpperCamelCase__ :List[Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = AutoencoderKL()
UpperCamelCase__ :Optional[Any] = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __a ( self :Union[str, Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str]=0 ):
if str(lowerCamelCase__ ).startswith("""mps""" ):
UpperCamelCase__ :Any = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ :List[str] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Optional[Any] = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :List[Any] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase__ )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
UpperCamelCase__ :List[str] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ :Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = pipe("""anime turle""" , generator=lowerCamelCase__ , output_type="""np""" )
UpperCamelCase__ :int = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ :int = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
UpperCamelCase__ :str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ :Optional[int] = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
UpperCamelCase__ :Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 715 |
from collections.abc import Generator
def A ( ) -> Generator[int, None, None]:
UpperCamelCase__ , UpperCamelCase__ :str = 0, 1
while True:
UpperCamelCase__ , UpperCamelCase__ :Tuple = b, a + b
yield b
def A ( lowercase__ : int = 1000 ) -> int:
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Any = fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 383 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__snake_case = logging.get_logger(__name__)
class _a ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , *lowercase_ : int , **lowercase_ : Any ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 451 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class a_ ( _lowerCAmelCase ):
__A = "distilbert"
__A = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self : int , lowercase : Union[str, Any]=30_522 , lowercase : List[Any]=512 , lowercase : Tuple=False , lowercase : Dict=6 , lowercase : List[str]=12 , lowercase : Union[str, Any]=768 , lowercase : int=4 * 768 , lowercase : Union[str, Any]=0.1 , lowercase : List[str]=0.1 , lowercase : List[str]="gelu" , lowercase : Tuple=0.02 , lowercase : int=0.1 , lowercase : Any=0.2 , lowercase : List[Any]=0 , **lowercase : Optional[Any] , ):
"""simple docstring"""
lowercase_ :Optional[int] = vocab_size
lowercase_ :Optional[int] = max_position_embeddings
lowercase_ :List[Any] = sinusoidal_pos_embds
lowercase_ :Dict = n_layers
lowercase_ :List[str] = n_heads
lowercase_ :int = dim
lowercase_ :str = hidden_dim
lowercase_ :Tuple = dropout
lowercase_ :Any = attention_dropout
lowercase_ :Optional[int] = activation
lowercase_ :Dict = initializer_range
lowercase_ :int = qa_dropout
lowercase_ :Tuple = seq_classif_dropout
super().__init__(**lowercase , pad_token_id=lowercase )
class a_ ( _lowerCAmelCase ):
@property
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ :int = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase_ :Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 172 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__lowerCamelCase )
_snake_case = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__lowerCamelCase )
env_command_parser(subparsers=__lowerCamelCase )
launch_command_parser(subparsers=__lowerCamelCase )
tpu_command_parser(subparsers=__lowerCamelCase )
test_command_parser(subparsers=__lowerCamelCase )
# Let's go
_snake_case = parser.parse_args()
if not hasattr(__lowerCamelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowerCamelCase )
if __name__ == "__main__":
main()
| 430 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Dict:
_snake_case = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> List[str]:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_snake_case = emb.weight.data
return lin_layer
def _UpperCAmelCase ( __lowerCamelCase : str ) -> Any:
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )
_snake_case = Namespace(**checkpoint['''cfg''']['''model'''] )
_snake_case = checkpoint['''model''']
remove_ignore_keys_(__lowerCamelCase )
_snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_snake_case = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
_snake_case = XGLMConfig(
vocab_size=__lowerCamelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_snake_case = XGLMForCausalLM(__lowerCamelCase )
_snake_case = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
print(__lowerCamelCase )
_snake_case = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 430 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
UpperCAmelCase__ = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
UpperCAmelCase__ = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments,) )
((SCREAMING_SNAKE_CASE_) ,) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path, decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path, encoder_config=__a, decoder_config=__a, )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
SCREAMING_SNAKE_CASE_ = decoder_config.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = decoder_config.pad_token_id
if decoder_start_token_id is None:
SCREAMING_SNAKE_CASE_ = decoder_config.bos_token_id
if pad_token_id is None:
SCREAMING_SNAKE_CASE_ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
SCREAMING_SNAKE_CASE_ = decoder_config.eos_token_id
SCREAMING_SNAKE_CASE_ = decoder_start_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main() | 626 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class snake_case :
def __init__(self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE_ = [1, 2, 3, 4]
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = len(self.first_signal )
SCREAMING_SNAKE_CASE_ = len(self.second_signal )
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE_ = [[0] * max_length for i in range(SCREAMING_SNAKE_CASE_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = deque(self.second_signal )
rotated_signal.rotate(SCREAMING_SNAKE_CASE_ )
for j, item in enumerate(SCREAMING_SNAKE_CASE_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE_ = np.matmul(np.transpose(SCREAMING_SNAKE_CASE_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(SCREAMING_SNAKE_CASE_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 626 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCamelCase : str =None
_UpperCamelCase : int =logging.get_logger(__name__)
_UpperCamelCase : Dict ={'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase : Any ={
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_UpperCamelCase : List[str] ={
'camembert-base': 512,
}
_UpperCamelCase : str ='▁'
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[int] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[str] = ["input_ids", "attention_mask"]
__snake_case : str = CamembertTokenizer
def __init__( self ,A__=None ,A__=None ,A__="<s>" ,A__="</s>" ,A__="</s>" ,A__="<s>" ,A__="<unk>" ,A__="<pad>" ,A__="<mask>" ,A__=["<s>NOTUSED", "</s>NOTUSED"] ,**A__ ,):
# Mask token behave like a normal word, i.e. include the space before it
_A : List[Any] = AddedToken(A__ ,lstrip=A__ ,rstrip=A__ ) if isinstance(A__ ,A__ ) else mask_token
super().__init__(
A__ ,tokenizer_file=A__ ,bos_token=A__ ,eos_token=A__ ,sep_token=A__ ,cls_token=A__ ,unk_token=A__ ,pad_token=A__ ,mask_token=A__ ,additional_special_tokens=A__ ,**A__ ,)
_A : int = vocab_file
_A : Any = False if not self.vocab_file else True
def A__ ( self ,A__ ,A__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A : Dict = [self.cls_token_id]
_A : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self ,A__ ,A__ = None ):
_A : Any = [self.sep_token_id]
_A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self ,A__ ,A__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A : Union[str, Any] = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file ,A__ )
return (out_vocab_file,)
| 332 |
from math import pow, sqrt
def a__ (*__lowercase :float ) -> bool:
_A : List[str] = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def a__ (__lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 332 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
def A_ ( A__ , A__=False , A__=False ) -> int:
a__ : int = 'backbone.' if is_semantic else ''
a__ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'{prefix}cls_token', 'beit.embeddings.cls_token'),
(F'{prefix}patch_embed.proj.weight', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'{prefix}patch_embed.proj.bias', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'{prefix}pos_embed', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A_ ( A__ , A__ , A__=False , A__=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
a__ : Optional[int] = 'backbone.' if is_semantic else ''
# queries, keys and values
a__ : Any = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' )
a__ : Any = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' )
a__ : Optional[int] = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' )
a__ : Any = in_proj_weight[
: config.hidden_size, :
]
a__ : str = q_bias
a__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
a__ : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
a__ : List[str] = state_dict.pop(F'{prefix}blocks.{i}.gamma_1' )
a__ : Dict = state_dict.pop(F'{prefix}blocks.{i}.gamma_2' )
a__ : Optional[Any] = gamma_a
a__ : str = gamma_a
def A_ ( A__ , A__ , A__ ) -> Any:
a__ : Union[str, Any] = dct.pop(A__ )
a__ : Tuple = val
def A_ ( ) -> int:
a__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ : List[str] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def A_ ( A__ , A__ , A__=False ) -> Tuple:
a__ : str = False if 'rvlcdip' in checkpoint_url else True
a__ : int = BeitConfig(use_absolute_position_embeddings=A__ , use_mask_token=A__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
a__ : Tuple = 1024
a__ : Optional[Any] = 4096
a__ : Optional[int] = 24
a__ : Optional[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
a__ : Tuple = 16
a__ : List[str] = 'huggingface/label-files'
a__ : Any = 'rvlcdip-id2label.json'
a__ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
a__ : Any = {int(A__ ): v for k, v in idalabel.items()}
a__ : str = idalabel
a__ : Any = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
a__ : Tuple = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )['model']
a__ : Union[str, Any] = create_rename_keys(A__ , has_lm_head=A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , has_lm_head=A__ )
# load HuggingFace model
a__ : Any = BeitForMaskedImageModeling(A__ ) if has_lm_head else BeitForImageClassification(A__ )
model.eval()
model.load_state_dict(A__ )
# Check outputs on an image
a__ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=A__ )
a__ : str = prepare_img()
a__ : List[str] = image_processor(images=A__ , return_tensors='pt' )
a__ : Any = encoding['pixel_values']
a__ : int = model(A__ )
a__ : Optional[int] = outputs.logits
# verify logits
a__ : List[str] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(A__ ), "Shape of logits not as expected"
Path(A__ ).mkdir(exist_ok=A__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
if has_lm_head:
a__ : Dict = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
a__ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=A__ , )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=A__ , )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
lowercase : str = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 302 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A_ ( A__ ) -> Tuple:
# getting number of pixels in the image
a__ , a__ : Any = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
a__ : str = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase : Dict = imread("""image_data/lena.jpg""", 1)
# convert to its negative
lowercase : Tuple = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 302 | 1 |
"""simple docstring"""
import numpy as np
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCamelCase : Any = """facebook/wmt19-en-de"""
lowerCamelCase : int = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCamelCase : Dict = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCamelCase : Dict = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
lowerCamelCase : Dict = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
lowerCamelCase : Tuple = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
lowerCamelCase : Optional[int] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 168 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (3, 3_2, 1_2_8)
snake_case__ : List[Any] = tempfile.mkdtemp()
# fmt: off
snake_case__ : str = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case__ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
snake_case__ : Optional[int] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 3_2, """width""": 1_2_8},
}
snake_case__ : str = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
snake_case__ : List[str] = Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : Optional[int] = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.get_tokenizer()
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : Optional[int] = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
snake_case__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case__ : List[Any] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case__ : Optional[int] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : Tuple = self.get_tokenizer()
snake_case__ : Optional[Any] = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.prepare_image_inputs()
snake_case__ : Tuple = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : Optional[int] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : Union[str, Any] = self.get_tokenizer()
snake_case__ : int = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = """test"""
snake_case__ : Optional[Any] = processor(text=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : Any = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : int = """test"""
snake_case__ : Any = self.prepare_image_inputs()
snake_case__ : Union[str, Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : List[str] = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : Any = processor.char_decode(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Optional[int] = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : int = None
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : List[Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : Optional[Any] = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : str = torch.randn(1 , 2_7 , 3_8 )
snake_case__ : Dict = torch.randn(1 , 2_7 , 5_0_2_5_7 )
snake_case__ : Union[str, Any] = torch.randn(1 , 2_7 , 3_0_5_2_2 )
snake_case__ : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 38 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : Any=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE_ : Tuple=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]="relu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , ):
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = embeddings_size
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = hidden_act
_a = num_labels
_a = scope
_a = len(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : int ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Optional[Any] ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
_a = TFResNetModel(config=SCREAMING_SNAKE_CASE_ )
_a = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
_a = self.num_labels
_a = TFResNetForImageClassification(SCREAMING_SNAKE_CASE_ )
_a = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[str] ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_A = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
_A = False
def _UpperCAmelCase ( self : List[str] ):
_a = TFResNetModelTester(self )
_a = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : Any ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def _UpperCAmelCase ( self : int ):
pass
def _UpperCAmelCase ( self : int ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(SCREAMING_SNAKE_CASE_ )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
_a = model_class(SCREAMING_SNAKE_CASE_ )
_a = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a = layer_type
_a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _UpperCAmelCase ( self : str ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self : Optional[int] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' )
# forward pass
_a = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
_a = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
_a = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 562 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : Tuple = TypeVar('''T''')
class lowerCamelCase_( Generic[T] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 42 # Cache store of keys
lowercase__ : Any = 42 # References of the keys in cache
lowercase__ : Optional[int] = 10 # Maximum capacity of cache
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = deque()
_lowerCamelCase = set()
if not n:
_lowerCamelCase = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_lowerCamelCase = n
def snake_case__ ( self , lowerCamelCase__ ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCamelCase = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def snake_case__ ( self ):
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : int = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 713 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase_ : Tuple = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] )-> Optional[int]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
elif weight_type == "running_mean":
__snake_case = value
elif weight_type == "running_var":
__snake_case = value
elif weight_type == "num_batches_tracked":
__snake_case = value
elif weight_type == "inv_freq":
__snake_case = value
else:
__snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(_lowerCamelCase )[0].split('''.''' )[-2]
__snake_case = mapped_key.replace('''*''' , _lowerCamelCase )
if "pos_bias_u" in name:
__snake_case = None
elif "pos_bias_v" in name:
__snake_case = None
elif "weight_g" in name:
__snake_case = '''weight_g'''
elif "weight_v" in name:
__snake_case = '''weight_v'''
elif "bias" in name:
__snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = '''weight'''
elif "running_mean" in name:
__snake_case = '''running_mean'''
elif "inv_freq" in name:
__snake_case = '''inv_freq'''
elif "running_var" in name:
__snake_case = '''running_var'''
elif "num_batches_tracked" in name:
__snake_case = '''num_batches_tracked'''
else:
__snake_case = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] )-> Optional[Any]:
'''simple docstring'''
__snake_case = full_name.split('''conv_layers.''' )[-1]
__snake_case = name.split('''.''' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : str=True )-> Optional[Any]:
'''simple docstring'''
if config_path is not None:
__snake_case = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act='''swish''' )
else:
__snake_case = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__snake_case = '''rotary'''
if is_finetuned:
if dict_path:
__snake_case = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(_lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case = 0
__snake_case = 1
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
__snake_case = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCamelCase , )
__snake_case = True if config.feat_extract_norm == '''layer''' else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
__snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case = WavaVecaConformerForCTC(_lowerCamelCase )
else:
__snake_case = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case = argparse.Namespace(task='''audio_pretraining''' )
__snake_case = fairseq.tasks.setup_task(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
__snake_case = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase_ : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 24 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 1 |
from datetime import datetime as dt
import os
from github import Github
_a = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def snake_case__ ( ):
lowerCAmelCase__ :Dict = Github(os.environ["GITHUB_TOKEN"] )
lowerCAmelCase__ :Dict = g.get_repo("huggingface/transformers" )
lowerCAmelCase__ :List[Any] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCAmelCase__ :Any = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCAmelCase : i.created_at , reverse=UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = comments[0] if len(UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 719 |
from __future__ import annotations
import numpy as np
def snake_case__ ( UpperCAmelCase : np.ndarray ):
lowerCAmelCase__ ,lowerCAmelCase__ :List[str] = np.shape(UpperCAmelCase )
if rows != columns:
lowerCAmelCase__ :Tuple = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = np.zeros((rows, columns) )
lowerCAmelCase__ :Tuple = np.zeros((rows, columns) )
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
lowerCAmelCase__ :Any = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowerCAmelCase__ :Any = (table[i][j] - total) / upper[j][j]
lowerCAmelCase__ :Tuple = 1
for j in range(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase ) )
lowerCAmelCase__ :Dict = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> tuple[int, float, str]:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = cipher_alphabet or [chr(_lowercase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase_ : Union[str, Any] = {
'''a''': 0.0_84_97,
'''b''': 0.0_14_92,
'''c''': 0.0_22_02,
'''d''': 0.0_42_53,
'''e''': 0.1_11_62,
'''f''': 0.0_22_28,
'''g''': 0.0_20_15,
'''h''': 0.0_60_94,
'''i''': 0.0_75_46,
'''j''': 0.0_01_53,
'''k''': 0.0_12_92,
'''l''': 0.0_40_25,
'''m''': 0.0_24_06,
'''n''': 0.0_67_49,
'''o''': 0.0_75_07,
'''p''': 0.0_19_29,
'''q''': 0.0_00_95,
'''r''': 0.0_75_87,
'''s''': 0.0_63_27,
'''t''': 0.0_93_56,
'''u''': 0.0_27_58,
'''v''': 0.0_09_78,
'''w''': 0.0_25_60,
'''x''': 0.0_01_50,
'''y''': 0.0_19_94,
'''z''': 0.0_00_77,
}
else:
# Custom frequencies dictionary
lowerCamelCase_ : Dict = frequencies_dict
if not case_sensitive:
lowerCamelCase_ : List[Any] = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_lowercase ) ):
lowerCamelCase_ : Any = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase_ : Union[str, Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
_lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase_ : Optional[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase_ : Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ : str = decrypted_with_shift.lower().count(_lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ : Union[str, Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ : int = decrypted_with_shift.count(_lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase_ : List[str] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowercase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase_ : int = min(
_lowercase , key=_lowercase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase_
), (
lowerCamelCase_
),
) : Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 422 |
'''simple docstring'''
def lowercase_ ( _lowercase ) -> set:
'''simple docstring'''
lowerCamelCase_ : str = set()
# edges = list of graph's edges
lowerCamelCase_ : Optional[Any] = get_edges(_lowercase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = edges.pop()
chosen_vertices.add(_lowercase )
chosen_vertices.add(_lowercase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowercase )
return chosen_vertices
def lowercase_ ( _lowercase ) -> set:
'''simple docstring'''
lowerCamelCase_ : Any = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 422 | 1 |
"""simple docstring"""
import sys
UpperCAmelCase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowercase ( a__ : str ) -> int:
_UpperCamelCase = 1
for digit in s:
product *= int(a__ )
return product
def lowercase ( a__ : str = N ) -> int:
_UpperCamelCase = -sys.maxsize - 1
_UpperCamelCase = n[:13]
_UpperCamelCase = 13
while cur_index < len(a__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_UpperCamelCase = substr[1:] + n[cur_index]
cur_index += 1
else:
_UpperCamelCase = max(a__ , str_eval(a__ ) )
_UpperCamelCase = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 711 | """simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowercase ( a__ : Optional[int] , a__ : List[str] , a__ : List[str] , a__ : Union[str, Any] , a__ : Tuple , a__ : Dict ) -> Optional[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(a__ ) , version.parse(a__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( a__ : str , a__ : Optional[str] = None ) -> None:
_UpperCamelCase = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , a__ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = requirement, None, None
else:
_UpperCamelCase = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_full.split(''',''' ) # there could be multiple requirements
_UpperCamelCase = {}
for w in want_range:
_UpperCamelCase = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
_UpperCamelCase = '''.'''.join([str(a__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
return
# check if any version is installed
try:
_UpperCamelCase = importlib.metadata.version(a__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
def lowercase ( a__ : Optional[int] ) -> Dict:
_UpperCamelCase = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(a__ , a__ )
| 342 | 0 |
'''simple docstring'''
import os
def __A ( ) -> List[Any]:
'''simple docstring'''
with open(os.path.dirname(UpperCAmelCase ) + "/grid.txt" ) as f:
_UpperCamelCase : Dict = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(UpperCAmelCase ) for x in f.readline().split()] )
_UpperCamelCase : int = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
_UpperCamelCase : Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_UpperCamelCase : str = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
_UpperCamelCase : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_UpperCamelCase : Optional[int] = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
_UpperCamelCase : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_UpperCamelCase : str = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 ,2_0 ):
_UpperCamelCase : str = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_UpperCamelCase : List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 435 | '''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCAmelCase_ : List[str] = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class SCREAMING_SNAKE_CASE ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def snake_case__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : str = load_tool("text-question-answering" , remote=lowercase__ )
def snake_case__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : int = self.tool(lowercase__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(lowercase__ , "launched the BigScience Research Workshop" )
def snake_case__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.remote_tool(lowercase__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(lowercase__ , "launched the BigScience Research Workshop" )
def snake_case__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.tool(text=lowercase__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(lowercase__ , "launched the BigScience Research Workshop" )
def snake_case__ ( self : Any ) ->str:
'''simple docstring'''
_UpperCamelCase : List[str] = self.remote_tool(text=lowercase__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(lowercase__ , "launched the BigScience Research Workshop" )
| 435 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=2 , A_=3 , A_=4 , A_=2 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=36 , A_=3 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=6 , A_=6 , A_=3 , A_=4 , A_=None , A_=1000 , ) -> Optional[int]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =text_seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =coordinate_size
__UpperCamelCase =shape_size
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
__UpperCamelCase =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__UpperCamelCase =text_seq_length
__UpperCamelCase =(image_size // patch_size) ** 2 + 1
__UpperCamelCase =self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCamelCase =bbox[i, j, 3]
__UpperCamelCase =bbox[i, j, 1]
__UpperCamelCase =t
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCamelCase =bbox[i, j, 2]
__UpperCamelCase =bbox[i, j, 0]
__UpperCamelCase =t
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.text_seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__UpperCamelCase =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =LayoutLMvaModel(config=A_ )
model.to(A_ )
model.eval()
# text + image
__UpperCamelCase =model(A_ , pixel_values=A_ )
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ )
__UpperCamelCase =model(A_ , bbox=A_ , pixel_values=A_ , token_type_ids=A_ )
__UpperCamelCase =model(A_ , bbox=A_ , pixel_values=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__UpperCamelCase =model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__UpperCamelCase =model(pixel_values=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =LayoutLMvaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =LayoutLMvaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =LayoutLMvaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> int:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> List[Any]:
__UpperCamelCase =LayoutLMvaModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self , A_ , A_ , A_=False ) -> Optional[int]:
__UpperCamelCase =copy.deepcopy(A_ )
if model_class in get_values(A_ ):
__UpperCamelCase ={
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(A_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
__UpperCamelCase =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in get_values(A_ ):
__UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
__UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in [
*get_values(A_ ),
]:
__UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in [
*get_values(A_ ),
]:
__UpperCamelCase =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=A_ , )
return inputs_dict
def _a ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase =type
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def _a ( self ) -> Optional[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =LayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Dict:
__UpperCamelCase =LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
__UpperCamelCase =torch.tensor([[1, 2]] )
__UpperCamelCase =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__UpperCamelCase =model(
input_ids=input_ids.to(A_ ) , bbox=bbox.to(A_ ) , pixel_values=pixel_values.to(A_ ) , )
# verify the logits
__UpperCamelCase =torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , A_ )
__UpperCamelCase =torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1E-4 ) )
| 682 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase : Any = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = ['OwlViTFeatureExtractor']
__UpperCAmelCase : List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 241 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class a ( _a ):
"""simple docstring"""
def __init__( self : Any ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = []
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[Any] , snake_case : List[Any] , snake_case : Optional[Any] , **snake_case : str ) -> Any:
self.events.append('''on_init_end''' )
def lowerCamelCase__ ( self : str , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Any , **snake_case : Tuple ) -> Optional[int]:
self.events.append('''on_train_begin''' )
def lowerCamelCase__ ( self : str , snake_case : Optional[int] , snake_case : str , snake_case : Optional[int] , **snake_case : Optional[Any] ) -> int:
self.events.append('''on_train_end''' )
def lowerCamelCase__ ( self : int , snake_case : Optional[Any] , snake_case : Dict , snake_case : List[Any] , **snake_case : List[str] ) -> str:
self.events.append('''on_epoch_begin''' )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Tuple , snake_case : Dict , snake_case : int , **snake_case : List[Any] ) -> Dict:
self.events.append('''on_epoch_end''' )
def lowerCamelCase__ ( self : Tuple , snake_case : Optional[Any] , snake_case : str , snake_case : Dict , **snake_case : Dict ) -> str:
self.events.append('''on_step_begin''' )
def lowerCamelCase__ ( self : str , snake_case : Tuple , snake_case : List[Any] , snake_case : List[str] , **snake_case : Optional[int] ) -> Optional[int]:
self.events.append('''on_step_end''' )
def lowerCamelCase__ ( self : Any , snake_case : str , snake_case : List[Any] , snake_case : Optional[Any] , **snake_case : Any ) -> List[str]:
self.events.append('''on_evaluate''' )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : Tuple , **snake_case : Tuple ) -> Dict:
self.events.append('''on_predict''' )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Any , snake_case : List[str] , snake_case : int , **snake_case : int ) -> List[Any]:
self.events.append('''on_save''' )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Tuple , snake_case : List[Any] , snake_case : List[str] , **snake_case : str ) -> List[str]:
self.events.append('''on_log''' )
def lowerCamelCase__ ( self : str , snake_case : Dict , snake_case : Tuple , snake_case : Tuple , **snake_case : List[str] ) -> Optional[Any]:
self.events.append('''on_prediction_step''' )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
__UpperCAmelCase : int = tempfile.mkdtemp()
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
shutil.rmtree(self.output_dir )
def lowerCamelCase__ ( self : Dict , snake_case : str=0 , snake_case : Union[str, Any]=0 , snake_case : Optional[Any]=64 , snake_case : Tuple=64 , snake_case : List[str]=None , snake_case : List[str]=False , **snake_case : Any ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__UpperCAmelCase : Any = RegressionDataset(length=snake_case )
__UpperCAmelCase : Any = RegressionDataset(length=snake_case )
__UpperCAmelCase : Optional[int] = RegressionModelConfig(a=snake_case , b=snake_case )
__UpperCAmelCase : List[str] = RegressionPreTrainedModel(snake_case )
__UpperCAmelCase : Optional[Any] = TrainingArguments(self.output_dir , disable_tqdm=snake_case , report_to=[] , **snake_case )
return Trainer(
snake_case , snake_case , train_dataset=snake_case , eval_dataset=snake_case , callbacks=snake_case , )
def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] , snake_case : Optional[int] ) -> Optional[int]:
self.assertEqual(len(snake_case ) , len(snake_case ) )
# Order doesn't matter
__UpperCAmelCase : int = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
__UpperCAmelCase : Dict = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case , snake_case ):
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , snake_case )
elif isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , cba.__class__ )
elif not isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(cba.__class__ , snake_case )
else:
self.assertEqual(snake_case , snake_case )
def lowerCamelCase__ ( self : List[Any] , snake_case : Dict ) -> Tuple:
__UpperCAmelCase : Optional[Any] = ['''on_init_end''', '''on_train_begin''']
__UpperCAmelCase : int = 0
__UpperCAmelCase : Union[str, Any] = len(trainer.get_eval_dataloader() )
__UpperCAmelCase : Any = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase__ ( self : Any ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.get_trainer()
__UpperCAmelCase : List[str] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# Callbacks passed at init are added to the default callbacks
__UpperCAmelCase : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__UpperCAmelCase : List[str] = self.get_trainer(disable_tqdm=snake_case )
__UpperCAmelCase : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__UpperCAmelCase : Any = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
__UpperCAmelCase : Dict = self.get_trainer()
__UpperCAmelCase : List[str] = trainer.pop_callback(snake_case )
self.assertEqual(cb.__class__ , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# We can also add, pop, or remove by instance
__UpperCAmelCase : Tuple = self.get_trainer()
__UpperCAmelCase : int = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
__UpperCAmelCase : Dict = self.get_trainer()
__UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[0]
__UpperCAmelCase : int = trainer.pop_callback(snake_case )
self.assertEqual(snake_case , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=snake_case )
__UpperCAmelCase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# Independent log/save/eval
__UpperCAmelCase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
__UpperCAmelCase : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
__UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__UpperCAmelCase : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
__UpperCAmelCase : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# A bit of everything
__UpperCAmelCase : List[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__UpperCAmelCase : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case ) in warn_mock.call_args[0][0] | 266 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : str , snake_case : Union[str, Any]=13 , snake_case : int=7 , snake_case : Union[str, Any]=True , snake_case : Tuple=True , snake_case : Tuple=True , snake_case : Union[str, Any]=True , snake_case : Tuple=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : int=4 , snake_case : Union[str, Any]=37 , snake_case : Union[str, Any]="gelu" , snake_case : List[Any]=0.1 , snake_case : List[Any]=0.1 , snake_case : Any=512 , snake_case : Dict=16 , snake_case : Dict=2 , snake_case : int=0.02 , snake_case : Dict=3 , snake_case : Tuple=4 , snake_case : Any=None , ) -> Tuple:
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : List[Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : List[str] = scope
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , snake_case : Optional[int] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : str , snake_case : Union[str, Any] , snake_case : Tuple ) -> Dict:
__UpperCAmelCase : Any = NystromformerModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case )
__UpperCAmelCase : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , snake_case : int , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : List[str] , snake_case : Any , snake_case : Tuple ) -> List[Any]:
__UpperCAmelCase : List[Any] = NystromformerForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = NystromformerForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Dict , snake_case : Any , snake_case : int , snake_case : Dict , snake_case : Any , snake_case : Optional[int] , snake_case : Tuple , snake_case : Any ) -> List[str]:
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : str = NystromformerForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : int , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : List[str] ) -> Optional[Any]:
__UpperCAmelCase : str = self.num_labels
__UpperCAmelCase : int = NystromformerForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : int , snake_case : Tuple , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] ) -> Any:
__UpperCAmelCase : List[str] = self.num_choices
__UpperCAmelCase : List[str] = NystromformerForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : str = NystromformerModelTester(self )
__UpperCAmelCase : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : int ) -> Any:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : str ) -> int:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = NystromformerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case )[0]
__UpperCAmelCase : Tuple = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case )
__UpperCAmelCase : Tuple = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> str:
__UpperCAmelCase : str = '''the [MASK] of Belgium is Brussels'''
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Tuple = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Optional[Any] = tokenizer(snake_case , return_tensors='''pt''' )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(encoding.input_ids ).logits
__UpperCAmelCase : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case ) , '''capital''' ) | 266 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__snake_case :Any ={
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class lowerCAmelCase__ :
def __init__( self : Dict , __UpperCamelCase : int = 14 ) -> None:
if group not in primes:
raise ValueError('Unsupported Group' )
A = primes[group]['prime']
A = primes[group]['generator']
A = int(hexlify(urandom(32 ) ) , base=16 )
def __UpperCamelCase ( self : Dict ) -> str:
return hex(self.__private_key )[2:]
def __UpperCamelCase ( self : List[Any] ) -> str:
A = pow(self.generator , self.__private_key , self.prime )
return hex(__UpperCamelCase )[2:]
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__UpperCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : str ) -> str:
A = int(__UpperCamelCase , base=16 )
if not self.is_valid_public_key(__UpperCamelCase ):
raise ValueError('Invalid public key' )
A = pow(__UpperCamelCase , self.__private_key , self.prime )
return shaaaa(str(__UpperCamelCase ).encode() ).hexdigest()
@staticmethod
def __UpperCamelCase ( __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__UpperCamelCase , (prime - 1) // 2 , __UpperCamelCase ) == 1
)
@staticmethod
def __UpperCamelCase ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 14 ) -> str:
A = int(__UpperCamelCase , base=16 )
A = int(__UpperCamelCase , base=16 )
A = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(__UpperCamelCase , __UpperCamelCase ):
raise ValueError('Invalid public key' )
A = pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return shaaaa(str(__UpperCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 | '''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 | 0 |
from __future__ import annotations
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 484 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ : Tuple = 'src/transformers'
a_ : Any = 'docs/source/en/tasks'
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
with open(UpperCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase = f.readlines()
# Find the start prompt.
lowerCamelCase = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
lowerCamelCase = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
a_ : List[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ : Union[str, Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = TASK_GUIDE_TO_MODELS[task_guide]
lowerCamelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCAmelCase__ , set() )
lowerCamelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=False ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCamelCase = get_model_list_for_task(UpperCAmelCase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ : str = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 484 | 1 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCAmelCase ( ):
lowercase__ = torch.nn.Linear(2 , 4 )
lowercase__ = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowercase__ = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowercase__ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowercase__ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
class _snake_case ( lowercase__):
@require_cuda
def A__ ( self : Optional[int] ):
lowercase__ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowercase ):
lowercase__ = Accelerator(cpu=__lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = Accelerator()
lowercase__ = GradientState()
assert state.num_steps == 1
lowercase__ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowercase__ = False
assert state.sync_gradients is False
GradientState._reset_state()
def A__ ( self : List[Any] ):
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = accelerator.prepare(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def A__ ( self : Union[str, Any] ):
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
accelerator.prepare(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def A__ ( self : int ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowercase : Union[str, Any], **__lowercase : Union[str, Any] ):
pass
with patch("torch.cuda.set_device", __lowercase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
lowercase__ = Accelerator()
self.assertEqual(str(accelerator.state.device ), "cuda:64" )
def A__ ( self : Dict ):
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
accelerator.prepare(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = get_signature(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
def A__ ( self : Tuple ):
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
accelerator.prepare(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = get_signature(__lowercase )
# saving hook
def save_config(__lowercase : Tuple, __lowercase : List[str], __lowercase : List[Any] ):
lowercase__ = {"class_name": models[0].__class__.__name__}
with open(os.path.join(__lowercase, "data.json" ), "w" ) as f:
json.dump(__lowercase, __lowercase )
# loading hook
def load_config(__lowercase : List[str], __lowercase : Union[str, Any] ):
with open(os.path.join(__lowercase, "data.json" ), "r" ) as f:
lowercase__ = json.load(__lowercase )
lowercase__ = config["class_name"]
lowercase__ = accelerator.register_save_state_pre_hook(__lowercase )
lowercase__ = accelerator.register_load_state_pre_hook(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match with hooks
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowercase__ = "random"
# make sure loaded weights match with hooks
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match with hooks removed
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowercase__ = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def A__ ( self : Optional[Any] ):
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
lowercase__ = None
# This should work
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
self.assertTrue(dummy_obj is None )
def A__ ( self : int ):
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = create_components()
lowercase__ = [1, 2, 3]
# This should work
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
self.assertEqual(
getattr(__lowercase, "_is_accelerate_prepared", __lowercase ), __lowercase, "Dummy object should have `_is_accelerate_prepared` set to `True`", )
self.assertEqual(
getattr(__lowercase, "_is_accelerate_prepared", __lowercase ), __lowercase, "Model is missing `_is_accelerator_prepared` or is set to `False`", )
self.assertEqual(
getattr(__lowercase, "_is_accelerate_prepared", __lowercase ), __lowercase, "Optimizer is missing `_is_accelerator_prepared` or is set to `False`", )
self.assertEqual(
getattr(__lowercase, "_is_accelerate_prepared", __lowercase ), __lowercase, "Scheduler is missing `_is_accelerator_prepared` or is set to `False`", )
self.assertEqual(
getattr(__lowercase, "_is_accelerate_prepared", __lowercase ), __lowercase, "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`", )
self.assertEqual(
getattr(__lowercase, "_is_accelerate_prepared", __lowercase ), __lowercase, "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`", )
@slow
@require_bnb
def A__ ( self : List[str] ):
from transformers import AutoModelForCausalLM
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m", load_in_abit=__lowercase, device_map={"": 0}, )
lowercase__ = Accelerator()
# This should work
lowercase__ = accelerator.prepare(__lowercase )
@slow
@require_bnb
def A__ ( self : Tuple ):
from transformers import AutoModelForCausalLM
lowercase__ = Accelerator()
with init_empty_weights():
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m", )
model.tie_weights()
lowercase__ = infer_auto_device_map(__lowercase )
lowercase__ = "cpu"
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m", device_map=__lowercase, load_in_abit=__lowercase, llm_inta_enable_fpaa_cpu_offload=__lowercase )
# This should not work and get value error
with self.assertRaises(__lowercase ):
lowercase__ = accelerator.prepare(__lowercase )
@slow
@require_bnb
@require_multi_gpu
def A__ ( self : List[Any] ):
from transformers import AutoModelForCausalLM
lowercase__ = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m", )
model.tie_weights()
lowercase__ = infer_auto_device_map(__lowercase )
lowercase__ = 1
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m", load_in_abit=__lowercase, device_map=__lowercase, )
lowercase__ = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowercase ):
lowercase__ = accelerator.prepare(__lowercase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def A__ ( self : Dict ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m", )
lowercase__ = infer_auto_device_map(__lowercase )
lowercase__ = 1
lowercase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m", load_in_abit=__lowercase, device_map=__lowercase, )
lowercase__ = Accelerator()
# This should work
lowercase__ = accelerator.prepare(__lowercase )
@require_cuda
def A__ ( self : Optional[int] ):
lowercase__ = torch.nn.Linear(10, 10 )
lowercase__ = torch.optim.SGD(model.parameters(), lr=0.01 )
lowercase__ = Accelerator(cpu=__lowercase )
lowercase__ = accelerator.prepare(__lowercase )
| 413 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("only integers accepted as input" )
else:
lowercase__ = str(abs(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = [list(SCREAMING_SNAKE_CASE_ ) for char in range(len(SCREAMING_SNAKE_CASE_ ) )]
for index in range(len(SCREAMING_SNAKE_CASE_ ) ):
num_transpositions[index].pop(SCREAMING_SNAKE_CASE_ )
return max(
int("".join(list(SCREAMING_SNAKE_CASE_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 413 | 1 |
import heapq
def A__ ( snake_case_ : dict ):
SCREAMING_SNAKE_CASE__: list[list]= []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case_ , [-1 * len(snake_case_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
SCREAMING_SNAKE_CASE__: List[str]= set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
SCREAMING_SNAKE_CASE__: int= heapq.heappop(snake_case_ )[1][0]
chosen_vertices.add(snake_case_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
SCREAMING_SNAKE_CASE__: Optional[int]= elem[1][1].index(snake_case_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Dict = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 107 | from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
__a = 42
class _lowerCamelCase ( nn.Module ):
__a = 42
__a = (16, 32, 96, 256)
__a = jnp.floataa
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE__: Optional[int]= []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE__: str= self.block_out_channels[i]
SCREAMING_SNAKE_CASE__: List[str]= self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE__: List[Any]= nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= blocks
SCREAMING_SNAKE_CASE__: Optional[Any]= nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= self.conv_in(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= nn.silu(lowerCAmelCase )
for block in self.blocks:
SCREAMING_SNAKE_CASE__: Optional[int]= block(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= nn.silu(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= self.conv_out(lowerCAmelCase )
return embedding
@flax_register_to_config
class _lowerCamelCase ( nn.Module , UpperCamelCase_ , UpperCamelCase_ ):
__a = 32
__a = 4
__a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__a = False
__a = (320, 640, 1280, 1280)
__a = 2
__a = 8
__a = None
__a = 1280
__a = 0.0
__a = False
__a = jnp.floataa
__a = True
__a = 0
__a = "rgb"
__a = (16, 32, 96, 256)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE__: Any= (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.zeros(lowerCAmelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__: int= (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.zeros(lowerCAmelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= jax.random.split(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )["params"]
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= self.block_out_channels
SCREAMING_SNAKE_CASE__: str= block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE__: int= self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE__: Any= nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE__: str= FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE__: Optional[Any]= FlaxTimestepEmbedding(lowerCAmelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: str= FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE__: List[str]= self.only_cross_attention
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: str= (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Dict= (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE__: Optional[int]= []
SCREAMING_SNAKE_CASE__: List[str]= []
SCREAMING_SNAKE_CASE__: Union[str, Any]= block_out_channels[0]
SCREAMING_SNAKE_CASE__: Dict= nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE__: Tuple= output_channel
SCREAMING_SNAKE_CASE__: Any= block_out_channels[i]
SCREAMING_SNAKE_CASE__: Optional[Any]= i == len(lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE__: Any= FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= FlaxDownBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE__: str= nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
if not is_final_block:
SCREAMING_SNAKE_CASE__: str= nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= down_blocks
SCREAMING_SNAKE_CASE__: int= controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE__: Optional[int]= block_out_channels[-1]
SCREAMING_SNAKE_CASE__: int= FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE__: str= nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1.0 , lowerCAmelCase = True , lowerCAmelCase = False , ) -> Union[FlaxControlNetOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Dict= self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.flip(lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(lowerCAmelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__: int= timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__: Any= jnp.expand_dims(lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE__: Dict= self.time_proj(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= self.time_embedding(lowerCAmelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE__: int= jnp.transpose(lowerCAmelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__: str= self.conv_in(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.transpose(lowerCAmelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__: str= self.controlnet_cond_embedding(lowerCAmelCase )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE__: Tuple= (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= down_block(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= down_block(lowerCAmelCase , lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE__: Optional[Any]= self.mid_block(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE__: Optional[int]= ()
for down_block_res_sample, controlnet_block in zip(lowerCAmelCase , self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE__: List[str]= controlnet_block(lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE__: Optional[Any]= controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE__: Optional[int]= self.controlnet_mid_block(lowerCAmelCase )
# 6. scaling
SCREAMING_SNAKE_CASE__: Tuple= [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCAmelCase , mid_block_res_sample=lowerCAmelCase )
| 107 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return round(float(moles / volume ) * nfactor )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
import math
def UpperCamelCase__( UpperCamelCase__ : int )->list:
A__ = [True] * n
A__ = False
A__ = False
A__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
A__ = i * 2
while index < n:
A__ = False
A__ = index + i
A__ = [2]
for i in range(3 , UpperCamelCase__ , 2 ):
if is_prime[i]:
primes.append(UpperCamelCase__ )
return primes
def UpperCamelCase__( UpperCamelCase__ : int = 99_99_66_66_33_33 )->int:
A__ = math.floor(math.sqrt(UpperCamelCase__ ) ) + 1_00
A__ = prime_sieve(UpperCamelCase__ )
A__ = 0
A__ = 0
A__ = primes[prime_index]
while (last_prime**2) <= limit:
A__ = primes[prime_index + 1]
A__ = last_prime**2
A__ = next_prime**2
# Get numbers divisible by lps(current)
A__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 190 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : torch.FloatTensor
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 20 , _SCREAMING_SNAKE_CASE = 768 , _SCREAMING_SNAKE_CASE=77 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = "silu" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "linear" , _SCREAMING_SNAKE_CASE = "prd" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Dict:
super().__init__()
A_ = num_attention_heads
A_ = attention_head_dim
A_ = num_attention_heads * attention_head_dim
A_ = additional_embeddings
A_ = time_embed_dim or inner_dim
A_ = embedding_proj_dim or embedding_dim
A_ = clip_embed_dim or embedding_dim
A_ = Timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 )
A_ = TimestepEmbedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_dim=_SCREAMING_SNAKE_CASE , act_fn=_SCREAMING_SNAKE_CASE )
A_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if embedding_proj_norm_type is None:
A_ = None
elif embedding_proj_norm_type == "layer":
A_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
A_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if encoder_hid_proj_type is None:
A_ = None
elif encoder_hid_proj_type == "linear":
A_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
A_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _SCREAMING_SNAKE_CASE ) )
if added_emb_type == "prd":
A_ = nn.Parameter(torch.zeros(1 , 1 , _SCREAMING_SNAKE_CASE ) )
elif added_emb_type is None:
A_ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
A_ = nn.ModuleList(
[
BasicTransformerBlock(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , activation_fn='''gelu''' , attention_bias=_SCREAMING_SNAKE_CASE , )
for d in range(_SCREAMING_SNAKE_CASE )
] )
if norm_in_type == "layer":
A_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
elif norm_in_type is None:
A_ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
A_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
A_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
A_ = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , _SCREAMING_SNAKE_CASE , persistent=_SCREAMING_SNAKE_CASE )
A_ = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
A_ = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __A ( self ) -> Dict[str, AttentionProcessor]:
A_ = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , '''set_processor''' ):
A_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return processors
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , '''set_processor''' ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
self.set_attn_processor(AttnProcessor() )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , ) -> List[str]:
A_ = hidden_states.shape[0]
A_ = timestep
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
A_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
A_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ = timesteps * torch.ones(_SCREAMING_SNAKE_CASE , dtype=timesteps.dtype , device=timesteps.device )
A_ = self.time_proj(_SCREAMING_SNAKE_CASE )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A_ = timesteps_projected.to(dtype=self.dtype )
A_ = self.time_embedding(_SCREAMING_SNAKE_CASE )
if self.embedding_proj_norm is not None:
A_ = self.embedding_proj_norm(_SCREAMING_SNAKE_CASE )
A_ = self.embedding_proj(_SCREAMING_SNAKE_CASE )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A_ = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
A_ = self.proj_in(_SCREAMING_SNAKE_CASE )
A_ = self.positional_embedding.to(hidden_states.dtype )
A_ = []
A_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(_SCREAMING_SNAKE_CASE )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A_ = hidden_states[:, None, :]
A_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A_ = self.prd_embedding.to(hidden_states.dtype ).expand(_SCREAMING_SNAKE_CASE , -1 , -1 )
additional_embeds.append(_SCREAMING_SNAKE_CASE )
A_ = torch.cat(
_SCREAMING_SNAKE_CASE , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A_ = F.pad(
_SCREAMING_SNAKE_CASE , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
A_ = hidden_states + positional_embeddings
if attention_mask is not None:
A_ = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
A_ = F.pad(_SCREAMING_SNAKE_CASE , (0, self.additional_embeddings) , value=0.0 )
A_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
A_ = self.norm_in(_SCREAMING_SNAKE_CASE )
for block in self.transformer_blocks:
A_ = block(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
A_ = self.norm_out(_SCREAMING_SNAKE_CASE )
if self.prd_embedding is not None:
A_ = hidden_states[:, -1]
else:
A_ = hidden_states[:, additional_embeddings_len:]
A_ = self.proj_to_clip_embeddings(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
A_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 174 | '''simple docstring'''
import requests
__snake_case : int = 'YOUR API KEY'
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str = giphy_api_key ) -> list:
A_ = '''+'''.join(query.split() )
A_ = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
A_ = requests.get(_UpperCamelCase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 174 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) | 498 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE( A ):
@staticmethod
@abstractmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError() | 498 | 1 |
"""simple docstring"""
import sys
__lowercase : Tuple = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( _lowerCamelCase : str ):
lowerCamelCase_ = 1
for digit in s:
product *= int(_lowerCamelCase )
return product
def lowerCamelCase_ ( _lowerCamelCase : str = N ):
lowerCamelCase_ = -sys.maxsize - 1
lowerCamelCase_ = n[:1_3]
lowerCamelCase_ = 1_3
while cur_index < len(_lowerCamelCase ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCamelCase_ = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCamelCase_ = max(_lowerCamelCase , str_eval(_lowerCamelCase ) )
lowerCamelCase_ = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''') | 66 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCamelCase_ ( _lowerCamelCase : int = 8 ):
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : int ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_lowerCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
lowerCamelCase_ = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : int ):
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def lowerCamelCase_ ( _lowerCamelCase : Dict , _lowerCamelCase : str ):
pass # Put your code here...
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
pass # Put your code here...
def lowerCamelCase_ ( _lowerCamelCase : int , _lowerCamelCase : str ):
pass # Put your code here...
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : int = 8 ):
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCamelCase_ ( ):
lowerCamelCase_ = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowerCamelCase_ = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(_lowerCamelCase ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main() | 66 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """levit"""
def __init__( self , _UpperCamelCase=224 , _UpperCamelCase=3 , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=16 , _UpperCamelCase=[128, 256, 384] , _UpperCamelCase=[4, 8, 12] , _UpperCamelCase=[4, 4, 4] , _UpperCamelCase=[16, 16, 16] , _UpperCamelCase=0 , _UpperCamelCase=[2, 2, 2] , _UpperCamelCase=[2, 2, 2] , _UpperCamelCase=0.02 , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = kernel_size
_UpperCAmelCase = stride
_UpperCAmelCase = padding
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = depths
_UpperCAmelCase = key_dim
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = patch_size
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = initializer_range
_UpperCAmelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __UpperCamelCase ( A__ ):
__A : Tuple = version.parse("""1.11""" )
@property
def UpperCamelCase( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase( self ):
return 1e-4 | 32 |
import torch
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
else:
A__ = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 514 | 0 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowercase_ = float("nan")
class __A :
'''simple docstring'''
def __init__(self , A ) -> Union[str, Any]:
"""simple docstring"""
_a = sys.stdout
_a = open(A , '''a''' )
def __getattr__(self , A ) -> List[str]:
"""simple docstring"""
return getattr(self.stdout , A )
def a__ (self , A ) -> int:
"""simple docstring"""
self.stdout.write(A )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , A , 0 , re.M ) )
def lowerCAmelCase (__A=80 , __A=False):
"""simple docstring"""
_a = []
# deal with critical env vars
_a = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
_a = os.environ.get(__A , __A)
if val is not None:
cmd.append(F'''{key}={val}''')
# python executable (not always needed if the script is executable)
_a = sys.executable if full_python_path else sys.executable.split('''/''')[-1]
cmd.append(__A)
# now the normal args
cmd += list(map(shlex.quote , sys.argv))
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_a = []
_a = ''''''
while len(__A) > 0:
current_line += F'''{cmd.pop(0)} '''
if len(__A) == 0 or len(__A) + len(cmd[0]) + 1 > max_width - 1:
lines.append(__A)
_a = ''''''
return "\\\n".join(__A)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = re.sub(r'''[\\\n]+''' , ''' ''' , args.base_cmd)
# remove --output_dir if any and set our own
_a = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd)
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
_a = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd)
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd)
def lowerCAmelCase (__A , __A , __A , __A , __A , __A , __A):
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0)
return dict(
{k: random.uniform(0 , 100) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22])} , )
_a = subprocess.run(__A , capture_output=__A , text=__A)
if verbose:
print('''STDOUT''' , result.stdout)
print('''STDERR''' , result.stderr)
# save the streams
_a = variation.replace(''' ''' , '''-''')
with open(Path(__A) / F'''log.{prefix}.stdout.txt''' , '''w''') as f:
f.write(result.stdout)
with open(Path(__A) / F'''log.{prefix}.stderr.txt''' , '''w''') as f:
f.write(result.stderr)
if result.returncode != 0:
if verbose:
print('''failed''')
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''') as f:
_a = json.load(__A)
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCAmelCase (__A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
_a = []
_a = []
_a = F'''{id}: {variation:<{longest_variation_len}}'''
_a = F'''{preamble}: '''
_a = set(report_metric_keys + [target_metric_key])
for i in tqdm(range(__A) , desc=__A , leave=__A):
_a = process_run_single(
__A , __A , __A , __A , __A , __A , __A)
_a = single_run_metrics[target_metric_key]
if not math.isnan(__A):
metrics.append(__A)
results.append(__A)
outcome += "✓"
else:
outcome += "✘"
_a = F'''\33[2K\r{outcome}'''
if len(__A) > 0:
_a = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()}
_a = round(mean_metrics[target_metric_key] , 2)
_a = F'''{outcome} {mean_target}'''
if len(__A) > 1:
results_str += F''' {tuple(round(__A , 2) for x in results)}'''
print(__A)
_a = variation
return mean_metrics
else:
print(__A)
return {variation_key: variation, target_metric_key: nan}
def lowerCAmelCase ():
"""simple docstring"""
_a = torch.cuda.get_device_properties(torch.device('''cuda'''))
return F'''
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def lowerCAmelCase (__A , __A , __A , __A , __A):
"""simple docstring"""
_a = pd.DataFrame(__A)
_a = '''variation'''
_a = '''diff_%'''
_a = nan
if base_variation is not None and len(df[df[variation_key] == base_variation]):
# this may still return nan
_a = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__A):
# as a fallback, use the minimal value as the sentinel
_a = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__A):
_a = df.apply(
lambda __A: round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value)
if not math.isnan(r[target_metric_key])
else 0 , axis='''columns''' , )
# re-order columns
_a = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_a = df.reindex(__A , axis='''columns''') # reorder cols
# capitalize
_a = df.rename(str.capitalize , axis='''columns''')
# make the cols as narrow as possible
_a = df.rename(lambda __A: c.replace('''_''' , '''<br>''') , axis='''columns''')
_a = df.rename(lambda __A: c.replace('''_''' , '''\n''') , axis='''columns''')
_a = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__A , floatfmt='''.2f''')]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__A , floatfmt='''.2f''')]
print('''\n\n'''.join(__A))
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=__A , type=__A , required=__A , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=__A , type=__A , nargs='''+''' , required=__A , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=__A , type=__A , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=__A , type=__A , required=__A , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=__A , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=__A , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=__A , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=__A , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
_a = parser.parse_args()
_a = args.output_dir
Path(__A).mkdir(exist_ok=__A)
_a = get_base_command(__A , __A)
# split each dimension into its --foo variations
_a = [list(map(str.strip , re.split(r'''\|''' , __A))) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_a = list(map(str.strip , map(''' '''.join , itertools.product(*__A))))
_a = max(len(__A) for x in variations)
# split wanted keys
_a = args.report_metric_keys.split()
# capture prints into a log file for convenience
_a = F'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''')
print(F'''and this script\'s output is also piped into {report_fn}''')
_a = Tee(__A)
print(F'''\n*** Running {len(__A)} benchmarks:''')
print(F'''Base command: {' '.join(__A)}''')
_a = '''variation'''
_a = []
for id, variation in enumerate(tqdm(__A , desc='''Total completion: ''' , leave=__A)):
_a = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __A , __A , __A , __A , args.target_metric_key , __A , args.repeat_times , __A , args.verbose , ))
process_results(__A , args.target_metric_key , __A , args.base_variation , __A)
if __name__ == "__main__":
main()
| 352 |
'''simple docstring'''
lowercase_ = 256
# Modulus to hash a string
lowercase_ = 1_000_003
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = len(__A)
_a = len(__A)
if p_len > t_len:
return False
_a = 0
_a = 0
_a = 1
# Calculating the hash of pattern and substring of text
for i in range(__A):
_a = (ord(pattern[i]) + p_hash * alphabet_size) % modulus
_a = (ord(text[i]) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_a = (
(text_hash - ord(text[i]) * modulus_power) * alphabet_size
+ ord(text[i + p_len])
) % modulus
return False
def lowerCAmelCase ():
"""simple docstring"""
_a = '''abc1abc12'''
_a = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_a = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__A , __A) and not rabin_karp(__A , __A)
# Test 2)
_a = '''ABABX'''
_a = '''ABABZABABYABABX'''
assert rabin_karp(__A , __A)
# Test 3)
_a = '''AAAB'''
_a = '''ABAAAAAB'''
assert rabin_karp(__A , __A)
# Test 4)
_a = '''abcdabcy'''
_a = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__A , __A)
# Test 5)
_a = '''Lü'''
_a = '''Lüsai'''
assert rabin_karp(__A , __A)
_a = '''Lue'''
assert not rabin_karp(__A , __A)
print('''Success.''')
if __name__ == "__main__":
test_rabin_karp()
| 352 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = [], []
while len(lowerCamelCase_ ) > 1:
_lowercase , _lowercase : List[str] = min(lowerCamelCase_ ), max(lowerCamelCase_ )
start.append(lowerCamelCase_ )
end.append(lowerCamelCase_ )
collection.remove(lowerCamelCase_ )
collection.remove(lowerCamelCase_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 89 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_snake_case ).to(_snake_case )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__lowerCamelCase = model(input_ids.to(_snake_case ) , labels=labels.to(_snake_case ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 316 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a__ ( A__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class a__ ( unittest.TestCase ):
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Any = False
return options
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",revision="onnx",safety_checker=_A,feature_extractor=_A,provider=self.gpu_provider,sess_options=self.gpu_options,)
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE_ : str = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : Tuple = pipe(
prompt=_A,image=_A,mask_image=_A,guidance_scale=7.5,num_inference_steps=10,generator=_A,output_type="np",)
SCREAMING_SNAKE_CASE_ : str = output.images
SCREAMING_SNAKE_CASE_ : Tuple = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting",subfolder="scheduler",revision="onnx" )
SCREAMING_SNAKE_CASE_ : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",revision="onnx",scheduler=_A,safety_checker=_A,feature_extractor=_A,provider=self.gpu_provider,sess_options=self.gpu_options,)
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(
prompt=_A,image=_A,mask_image=_A,guidance_scale=7.5,num_inference_steps=20,generator=_A,output_type="np",)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE_ : Optional[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 718 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase : Optional[Any] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 316 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict:
__lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {}
__lowerCamelCase : int = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]:
__lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,):
super().__init__()
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source')
__lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target')
__lowerCamelCase : List[Any] = self.get_char_lens(self.src_file)
__lowerCamelCase : List[Any] = max_source_length
__lowerCamelCase : List[str] = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Optional[int] = prefix
if n_obs is not None:
__lowerCamelCase : Dict = self.src_lens[:n_obs]
__lowerCamelCase : str = src_lang
__lowerCamelCase : Any = tgt_lang
def __len__( self : Tuple):
return len(self.src_lens)
def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = index + 1 # linecache starts at 1
__lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
__lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
)
__lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
__lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right')
__lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right')
__lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int):
return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()]
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch])
__lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch])
__lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch])
__lowerCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
a =getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase : str = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ )
__lowerCamelCase : Any = {
'repo_id': str(lowerCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List:
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'wb' ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
def remove_articles(lowerCamelCase__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
__lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Dict = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
__lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 652 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a ( unittest.TestCase ):
UpperCAmelCase__ : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ):
__lowerCamelCase: List[Any] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCamelCase: Optional[int] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
"""score""": ANY(SCREAMING_SNAKE_CASE_ ),
"""label""": ANY(SCREAMING_SNAKE_CASE_ ),
"""box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE_ ), """ymin""": ANY(SCREAMING_SNAKE_CASE_ ), """xmax""": ANY(SCREAMING_SNAKE_CASE_ ), """ymax""": ANY(SCREAMING_SNAKE_CASE_ )},
} , )
import datasets
__lowerCamelCase: List[Any] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__lowerCamelCase: Any = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__lowerCamelCase: Dict = object_detector(SCREAMING_SNAKE_CASE_ , threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
"""score""": ANY(SCREAMING_SNAKE_CASE_ ),
"""label""": ANY(SCREAMING_SNAKE_CASE_ ),
"""box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE_ ), """ymin""": ANY(SCREAMING_SNAKE_CASE_ ), """xmax""": ANY(SCREAMING_SNAKE_CASE_ ), """ymax""": ANY(SCREAMING_SNAKE_CASE_ )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
__lowerCamelCase: int = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__lowerCamelCase: Optional[int] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__lowerCamelCase: Any = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
__lowerCamelCase: Dict = """facebook/detr-resnet-50"""
__lowerCamelCase: List[str] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: int = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__lowerCamelCase: List[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: Tuple = """facebook/detr-resnet-50"""
__lowerCamelCase: Optional[int] = pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__lowerCamelCase: Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
__lowerCamelCase: Optional[Any] = 0.9985
__lowerCamelCase: Dict = """facebook/detr-resnet-50"""
__lowerCamelCase: Optional[int] = pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
__lowerCamelCase: Optional[int] = """Narsil/layoutlmv3-finetuned-funsd"""
__lowerCamelCase: Dict = 0.9993
__lowerCamelCase: Any = pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 703 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowerCAmelCase ( snake_case : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
__lowerCamelCase: Dict = []
if isinstance(snake_case , snake_case ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case ) )
elif isinstance(snake_case , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case ) )
elif isinstance(snake_case , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __lowerCAmelCase ( snake_case : int , snake_case : Tuple[int, ...] ) -> Tuple[int, ...]:
__lowerCamelCase: Union[str, Any] = []
for d in reversed(snake_case ):
idx.append(flat_idx % d )
__lowerCamelCase: Optional[int] = flat_idx // d
return tuple(reversed(snake_case ) )
@torch.jit.ignore
def __lowerCAmelCase ( snake_case : Sequence[int] , snake_case : Sequence[int] , snake_case : Sequence[int] , snake_case : Optional[Sequence[bool]] = None , snake_case : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(snake_case : List[bool] ) -> None:
__lowerCamelCase: List[Any] = True
for i in range(len(snake_case ) ):
__lowerCamelCase: str = -1 * (i + 1)
l[reversed_idx] &= tally
__lowerCamelCase: List[Any] = l[reversed_idx]
if start_edges is None:
__lowerCamelCase: str = [s == 0 for s in start]
reduce_edge_list(snake_case )
if end_edges is None:
__lowerCamelCase: Union[str, Any] = [e == (d - 1) for e, d in zip(snake_case , snake_case )]
reduce_edge_list(snake_case )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case ) == 0:
return [()]
elif len(snake_case ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__lowerCamelCase: List[Tuple[slice, ...]] = []
__lowerCamelCase: List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case , snake_case ):
if s == e:
path_list.append(slice(snake_case , s + 1 ) )
else:
break
__lowerCamelCase: Tuple[slice, ...] = tuple(snake_case )
__lowerCamelCase: Dict = len(snake_case )
# start == end, and we're done
if divergence_idx == len(snake_case ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase: Dict = start[divergence_idx]
return tuple(
path + (slice(snake_case , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase: List[Any] = end[divergence_idx]
return tuple(
path + (slice(snake_case , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowerCamelCase: List[str] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowerCAmelCase ( snake_case : torch.Tensor , snake_case : int , snake_case : int , snake_case : int ) -> torch.Tensor:
__lowerCamelCase: Any = t.shape[:no_batch_dims]
__lowerCamelCase: Dict = list(_flat_idx_to_idx(snake_case , snake_case ) )
# _get_minimal_slice_set is inclusive
__lowerCamelCase: Optional[int] = list(_flat_idx_to_idx(flat_end - 1 , snake_case ) )
# Get an ordered list of slices to perform
__lowerCamelCase: str = _get_minimal_slice_set(
snake_case , snake_case , snake_case , )
__lowerCamelCase: Tuple = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowerCAmelCase ( snake_case : Callable , snake_case : Dict[str, Any] , snake_case : int , snake_case : int , snake_case : bool = False , snake_case : Any = None , snake_case : bool = False , ) -> Any:
if not (len(snake_case ) > 0):
raise ValueError("""Must provide at least one input""" )
__lowerCamelCase: Optional[int] = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case )]
__lowerCamelCase: int = tuple([max(snake_case ) for s in zip(*snake_case )] )
def _prep_inputs(snake_case : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowerCamelCase: Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowerCamelCase: Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__lowerCamelCase: Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowerCamelCase: Dict[str, Any] = tensor_tree_map(_prep_inputs , snake_case )
__lowerCamelCase: Optional[Any] = None
if _out is not None:
__lowerCamelCase: List[Any] = tensor_tree_map(lambda snake_case : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__lowerCamelCase: List[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowerCamelCase: int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowerCamelCase: List[Any] = 0
__lowerCamelCase: Optional[Any] = prepped_outputs
for _ in range(snake_case ):
# Chunk the input
if not low_mem:
__lowerCamelCase: int = _select_chunk
else:
__lowerCamelCase: Optional[int] = partial(
_chunk_slice , flat_start=snake_case , flat_end=min(snake_case , i + chunk_size ) , no_batch_dims=len(snake_case ) , )
__lowerCamelCase: Dict[str, Any] = tensor_tree_map(snake_case , snake_case )
# Run the layer on the chunk
__lowerCamelCase: Union[str, Any] = layer(**snake_case )
# Allocate space for the output
if out is None:
__lowerCamelCase: Dict = tensor_tree_map(lambda snake_case : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case )
# Put the chunk in its pre-allocated space
if isinstance(snake_case , snake_case ):
def assign(snake_case : dict , snake_case : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case , snake_case ):
assign(snake_case , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowerCamelCase: List[str] = da[k]
assign(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
for xa, xa in zip(snake_case , snake_case ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowerCamelCase: Optional[int] = xa
elif isinstance(snake_case , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowerCamelCase: Optional[Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
__lowerCamelCase: Union[str, Any] = tensor_tree_map(lambda snake_case : t.view(orig_batch_dims + t.shape[1:] ) , snake_case )
return out
class a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int = 512 , ):
__lowerCamelCase: Any = max_chunk_size
__lowerCamelCase: Optional[int] = None
__lowerCamelCase: Optional[tuple] = None
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : tuple , SCREAMING_SNAKE_CASE_ : int ):
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowerCamelCase: List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowerCamelCase: Optional[Any] = [c for c in candidates if c > min_chunk_size]
__lowerCamelCase: Union[str, Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(SCREAMING_SNAKE_CASE_ : int ) -> bool:
try:
with torch.no_grad():
fn(*SCREAMING_SNAKE_CASE_ , chunk_size=SCREAMING_SNAKE_CASE_ )
return True
except RuntimeError:
return False
__lowerCamelCase: List[str] = 0
__lowerCamelCase: Any = len(SCREAMING_SNAKE_CASE_ ) - 1
while i > min_viable_chunk_size_index:
__lowerCamelCase: List[str] = test_chunk_size(candidates[i] )
if not viable:
__lowerCamelCase: Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
__lowerCamelCase: Optional[Any] = i
__lowerCamelCase: List[str] = (i + len(SCREAMING_SNAKE_CASE_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Iterable , SCREAMING_SNAKE_CASE_ : Iterable ):
__lowerCamelCase: int = True
for aa, aa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert type(SCREAMING_SNAKE_CASE_ ) == type(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: int = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )]
__lowerCamelCase: Dict = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )]
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
consistent &= aa == aa
return consistent
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : tuple , SCREAMING_SNAKE_CASE_ : int , ):
__lowerCamelCase: int = True
__lowerCamelCase: tuple = tree_map(lambda SCREAMING_SNAKE_CASE_ : a.shape if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) else a , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE_ )
else:
# Otherwise, we can reuse the precomputed value
__lowerCamelCase: Optional[int] = False
if not consistent:
__lowerCamelCase: str = self._determine_favorable_chunk_size(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase: Optional[int] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 189 | 0 |
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
return 1
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int = 2_00 ) -> int:
'''simple docstring'''
return two_pound(_UpperCamelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 439 |
import argparse
import os
import re
a_ : List[str] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ : Optional[Any] = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
a_ : Optional[int] = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : bool = False ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE = content.split('\n' )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while line_idx < len(_UpperCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
SCREAMING_SNAKE_CASE = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
SCREAMING_SNAKE_CASE = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
SCREAMING_SNAKE_CASE = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
SCREAMING_SNAKE_CASE = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : _re_identifier.search(_UpperCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_UpperCamelCase ) )
elif "\n".join(_UpperCamelCase ) != content:
return True
def __lowerCAmelCase ( _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for f in os.listdir(_UpperCamelCase ) if f.endswith('.py' )]
SCREAMING_SNAKE_CASE = [sort_auto_mapping(_UpperCamelCase , overwrite=_UpperCamelCase ) for fname in fnames]
if not overwrite and any(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = [f for f, d in zip(_UpperCamelCase , _UpperCamelCase ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {', '.join(_UpperCamelCase )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
a_ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 439 | 1 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : Any ) -> Any:
_UpperCamelCase =3
_UpperCamelCase =250
_UpperCamelCase =ids_tensor((batch_size, length) , UpperCamelCase__ )
_UpperCamelCase =torch.ones((batch_size, length) , device=UpperCamelCase__ , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase__ ( self : Dict ) -> List[str]:
_UpperCamelCase , _UpperCamelCase =self._get_tensors(5 )
_UpperCamelCase =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase , _UpperCamelCase =self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase , _UpperCamelCase =self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
def UpperCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase =MaxLengthCriteria(max_length=10 )
_UpperCamelCase , _UpperCamelCase =self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase , _UpperCamelCase =self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase , _UpperCamelCase =self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
def UpperCamelCase__ ( self : List[Any] ) -> List[str]:
_UpperCamelCase =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_UpperCamelCase , _UpperCamelCase =self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase , _UpperCamelCase =self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase , _UpperCamelCase =self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase =StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCamelCase__ ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase , _UpperCamelCase =self._get_tensors(5 )
_UpperCamelCase =MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase__ , UpperCamelCase__ ) )
def UpperCamelCase__ ( self : Any ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_UpperCamelCase =validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
| 271 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowerCamelCase : List[Any] = 'bert-base-cased'
__lowerCamelCase : str = 'fp16'
__lowerCamelCase : Optional[int] = 'bf16'
__lowerCamelCase : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ) -> Union[str, Any]:
super().setUp()
_UpperCamelCase =dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def UpperCamelCase__ ( self : int ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =F'''{i + 1}'''
_UpperCamelCase =strategy
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =prefetch_policy
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCamelCase__ ( self : Optional[Any] ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =state_dict_type
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCamelCase__ ( self : Any ) -> List[Any]:
_UpperCamelCase =AutoModel.from_pretrained(UpperCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCamelCase ='''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
_UpperCamelCase ='''2000'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase ='''TRANSFORMER_BASED_WRAP'''
_UpperCamelCase ='''T5Layer'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase ='''SIZE_BASED_WRAP'''
_UpperCamelCase ='''0'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCamelCase__ ( self : Any ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =mp_dtype
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =Accelerator()
if mp_dtype == "fp16":
_UpperCamelCase =torch.floataa
elif mp_dtype == "bf16":
_UpperCamelCase =torch.bfloataa
_UpperCamelCase =MixedPrecision(param_dtype=UpperCamelCase__ , reduce_dtype=UpperCamelCase__ , buffer_dtype=UpperCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCamelCase__ )
def UpperCamelCase__ ( self : Dict ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =str(UpperCamelCase__ ).lower()
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[str]:
super().setUp()
_UpperCamelCase =0.82
_UpperCamelCase =[
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
_UpperCamelCase ={
'''multi_gpu_fp16''': 3200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCamelCase =160
_UpperCamelCase =160
_UpperCamelCase =inspect.getfile(accelerate.test_utils )
_UpperCamelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def UpperCamelCase__ ( self : int ) -> str:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_performance.py''' )
_UpperCamelCase =['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
_UpperCamelCase =cmd.copy()
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def UpperCamelCase__ ( self : Optional[Any] ) -> Any:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
_UpperCamelCase =[
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_UpperCamelCase =len(UpperCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCamelCase =cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
_UpperCamelCase =cmd_config[:-1]
_UpperCamelCase =os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def UpperCamelCase__ ( self : Dict ) -> int:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
_UpperCamelCase =[
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCamelCase =cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 271 | 1 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase ( __UpperCAmelCase):
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , '''num_heads''' ) )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[16, 48, 96] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 10] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ) -> str:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_sizes
__snake_case = patch_stride
__snake_case = patch_padding
__snake_case = is_training
__snake_case = use_labels
__snake_case = num_labels
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = num_heads
__snake_case = stride_kv
__snake_case = depth
__snake_case = cls_token
__snake_case = attention_drop_rate
__snake_case = initializer_range
__snake_case = layer_norm_eps
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = CvtModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__snake_case = model(lowerCamelCase__ )
__snake_case = (self.image_size, self.image_size)
__snake_case , __snake_case = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__snake_case = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__snake_case = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = CvtForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__snake_case = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
__lowercase : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__lowercase : Dict = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Dict = False
__lowercase : Any = False
__lowercase : Dict = False
__lowercase : Any = False
__lowercase : Optional[Any] = False
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = CvtModelTester(self )
__snake_case = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowerCamelCase__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__snake_case = outputs.hidden_states
__snake_case = len(self.model_tester.depth )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = CvtModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase):
@cached_property
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__snake_case = model(**lowerCamelCase__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__snake_case = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 24 |
'''simple docstring'''
def _A ( lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowercase__ , lowercase__ = 1, 1
for _ in range(number_of_steps - 1 ):
lowercase__ , lowercase__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _a :
"""simple docstring"""
def __init__( self : Dict , __UpperCamelCase : int , __UpperCamelCase : Tuple=1_3 , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Tuple=9_9 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : str=5 , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : Union[str, Any]=3_7 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Tuple=1_6 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : List[Any]=None , )->int:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowercase__ ( self : Tuple )->Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : List[str] )->List[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , use_stable_embedding=__UpperCamelCase , )
def lowercase__ ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]:
_UpperCAmelCase = OpenLlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , )->str:
_UpperCAmelCase = True
_UpperCAmelCase = OpenLlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : str , )->Tuple:
_UpperCAmelCase = OpenLlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , )->Dict:
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = OpenLlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
_UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )['''hidden_states'''][0]
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )['''hidden_states'''][0]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def lowercase__ ( self : Optional[int] )->Any:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCamelCase__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = OpenLlamaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : Optional[int] )->List[Any]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Tuple )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : str )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Dict:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = OpenLlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : Optional[int] )->List[str]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = '''single_label_classification'''
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = OpenLlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : List[str] )->Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = '''multi_label_classification'''
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase )
_UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase = OpenLlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def lowercase__ ( self : List[str] )->Tuple:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] )->List[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ids_tensor([1, 1_0] , config.vocab_size )
_UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_UpperCAmelCase = OpenLlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
_UpperCAmelCase = original_model(__UpperCamelCase ).last_hidden_state
_UpperCAmelCase = original_model(__UpperCamelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_UpperCAmelCase = {'''type''': scaling_type, '''factor''': 1_0.0}
_UpperCAmelCase = OpenLlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
_UpperCAmelCase = scaled_model(__UpperCamelCase ).last_hidden_state
_UpperCAmelCase = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
| 95 |
"""simple docstring"""
import functools
def lowercase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 1 |
'''simple docstring'''
import qiskit
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
A: Union[str, Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
A: List[str] = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
A: str = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 135 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = LEDTokenizer
A__ : List[str] = LEDTokenizerFast
A__ : List[Any] = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
A: Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A: Dict = dict(zip(A , range(len(A ) ) ) )
A: List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A: Tuple = {"""unk_token""": """<unk>"""}
A: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def a__ ( self , **A ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def a__ ( self , **A ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def a__ ( self , A ) -> Optional[Any]:
return "lower newer", "lower newer"
@cached_property
def a__ ( self ) -> Dict:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def a__ ( self ) -> Any:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def a__ ( self ) -> Dict:
A: Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A: str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A: Optional[Any] = tokenizer(A , max_length=len(A ) , padding=A , return_tensors="""pt""" )
self.assertIsInstance(A , A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
A: Any = batch.input_ids.tolist()[0]
self.assertListEqual(A , A )
@require_torch
def a__ ( self ) -> Union[str, Any]:
A: str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A: int = tokenizer(A , padding=A , return_tensors="""pt""" )
self.assertIn("""input_ids""" , A )
self.assertIn("""attention_mask""" , A )
self.assertNotIn("""labels""" , A )
self.assertNotIn("""decoder_attention_mask""" , A )
@require_torch
def a__ ( self ) -> Optional[Any]:
A: List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A: Optional[int] = tokenizer(text_target=A , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def a__ ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A: Any = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] , padding=A , truncation=A , return_tensors="""pt""" )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def a__ ( self ) -> Any:
A: int = ["""A long paragraph for summarization."""]
A: Optional[int] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A: str = tokenizer(A , return_tensors="""pt""" )
A: Optional[Any] = tokenizer(text_target=A , return_tensors="""pt""" )
A: Union[str, Any] = inputs["""input_ids"""]
A: Optional[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def a__ ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A: List[Any] = ["""Summary of the text.""", """Another summary."""]
A: int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
A: List[Any] = tokenizer(A , padding=A )
A: Tuple = [[0] * len(A ) for x in encoded_output["""input_ids"""]]
A: List[Any] = tokenizer.pad(A )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , A )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A: List[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
A: Any = self.tokenizer_class.from_pretrained(A , **A )
A: Tuple = """A, <mask> AllenNLP sentence."""
A: str = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
A: int = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A: int = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A: List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 135 | 1 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_ : Tuple = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
SCREAMING_SNAKE_CASE_ : Optional[int] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
SCREAMING_SNAKE_CASE_ : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
A__ = new_id
# turn into Numpy arrays
A__ = np.array(UpperCAmelCase_ )
A__ = np.array(UpperCAmelCase_ )
if reduce_labels:
A__ = 255
A__ = label - 1
A__ = 255
A__ = label != ignore_index
A__ = np.not_equal(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = pred_label[mask]
A__ = np.array(UpperCAmelCase_ )[mask]
A__ = pred_label[pred_label == label]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ , A__ , A__ , A__ = intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
A__ , A__ , A__ , A__ = total_intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# compute metrics
A__ = {}
A__ = total_area_intersect.sum() / total_area_label.sum()
A__ = total_area_intersect / total_area_union
A__ = total_area_intersect / total_area_label
A__ = np.nanmean(UpperCAmelCase_ )
A__ = np.nanmean(UpperCAmelCase_ )
A__ = all_acc
A__ = iou
A__ = acc
if nan_to_num is not None:
A__ = {metric: np.nan_to_num(UpperCAmelCase_ , nan=UpperCAmelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def UpperCamelCase ( self: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int , UpperCamelCase: bool , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[Dict[int, int]] = None , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = mean_iou(
results=UpperCamelCase , gt_seg_maps=UpperCamelCase , num_labels=UpperCamelCase , ignore_index=UpperCamelCase , nan_to_num=UpperCamelCase , label_map=UpperCamelCase , reduce_labels=UpperCamelCase , )
return iou_result
| 500 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 0
while number > 0:
A__ = number % 10
sum_of_digits += last_digit
A__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case ( UpperCAmelCase_ : int = 100 ):
A__ = factorial(UpperCAmelCase_ )
A__ = split_and_add(UpperCAmelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 500 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase = logging.getLogger(__name__)
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCAmelCase_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCAmelCase_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCAmelCase_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCAmelCase_ , default="data/dump" , help="The dump file prefix." )
lowerCAmelCase__ = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
lowerCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
lowerCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
lowerCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
lowerCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(F'{len(lowerCAmelCase_ )} examples to process.' )
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1_0000
lowerCAmelCase__ = time.time()
for text in data:
lowerCAmelCase__ = F'{bos} {text.strip()} {sep}'
lowerCAmelCase__ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
rslt.append(lowerCAmelCase_ )
iter += 1
if iter % interval == 0:
lowerCAmelCase__ = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(F'{len(lowerCAmelCase_ )} examples processed.' )
lowerCAmelCase__ = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase__ = [np.uintaa(lowerCAmelCase_ ) for d in rslt]
else:
lowerCAmelCase__ = [np.intaa(lowerCAmelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(lowerCAmelCase_ , "wb" ) as handle:
pickle.dump(rslt_ , lowerCAmelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 61 |
from __future__ import annotations
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase__ = result + left + right
return input_list
def _A ( lowerCAmelCase_ : list ):
"""simple docstring"""
if len(lowerCAmelCase_ ) <= 1:
return input_list
lowerCAmelCase__ = list(lowerCAmelCase_ )
# iteration for two-way merging
lowerCAmelCase__ = 2
while p <= len(lowerCAmelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = i + p - 1
lowerCAmelCase__ = (low + high + 1) // 2
lowerCAmelCase__ = merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = merge(lowerCAmelCase_ , 0 , lowerCAmelCase_ , len(lowerCAmelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
UpperCamelCase = []
else:
UpperCamelCase = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 61 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
a_ : Optional[int] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
a_ : Union[str, Any] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
a_ : Union[str, Any] = 'zero2'
a_ : Optional[Any] = 'zero3'
a_ : Any = [ZEROa, ZEROa]
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = parameterized.to_safe_name("_".join(str(UpperCAmelCase__ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
a_ : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
@parameterized.expand(__a , name_func=__a )
def _a (self , __a , __a ):
'''simple docstring'''
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def _a (self , __a , __a ):
'''simple docstring'''
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@parameterized.expand(__a , name_func=__a )
def _a (self , __a , __a ):
'''simple docstring'''
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def _a (self , __a , __a ):
'''simple docstring'''
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
def _a (self , __a ):
'''simple docstring'''
pass
def _a (self , __a , __a , __a = 10 , __a = True , __a = True , __a = True , ):
'''simple docstring'''
lowerCamelCase = models[model]
lowerCamelCase = self.run_trainer(
stage=__a , model_name=__a , eval_steps=__a , num_train_epochs=1 , distributed=__a , fpaa=__a , )
self.do_checks(__a )
return output_dir
def _a (self , __a , __a , __a = 10 , __a = 1 , __a = True , __a = True , ):
'''simple docstring'''
lowerCamelCase = self.get_auto_remove_tmp_dir("./xxx" , after=__a )
lowerCamelCase = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__a )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowerCamelCase = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
lowerCamelCase = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
lowerCamelCase = self.get_launcher(__a )
lowerCamelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__a , env=self.get_env() )
return output_dir
def _a (self , __a=False ):
'''simple docstring'''
lowerCamelCase = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 712 |
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
lowerCamelCase = len(UpperCAmelCase__ ) if (len(UpperCAmelCase__ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(UpperCAmelCase__ ) , "Postfix".center(UpperCAmelCase__ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCAmelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCAmelCase__ ) == 0:
stack.append(UpperCAmelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCAmelCase__ ) # push x to stack
print(
x.center(8 ) , ("".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , ("".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , sep=" | " , ) # Output in tabular format
while len(UpperCAmelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , ("".join(UpperCAmelCase__ )).ljust(UpperCAmelCase__ ) , sep=" | " , ) # Output in tabular format
return "".join(UpperCAmelCase__ ) # return Postfix as str
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCAmelCase__ ) ):
if infix[i] == "(":
lowerCamelCase = ")" # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase = "(" # change ")" to "("
return (infix_2_postfix("".join(UpperCAmelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a_ : str = input('\nEnter an Infix Equation = ') # Input an Infix equation
a_ : List[Any] = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)') | 484 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.