code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
UpperCAmelCase_ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 458 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __magic_name__ ( ) -> str:
"""simple docstring"""
lowercase_ : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowercase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowercase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowercase )
return parser.parse_args()
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = parse_args()
# Import training_script as a module.
lowercase_ : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ : Tuple = script_fpath.stem
lowercase_ : List[str] = importlib.import_module(lowercase )
# Patch sys.argv
lowercase_ : Optional[int] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 458 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( lowercase ):
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , "tf_padding" ) )
self.parent.assertTrue(hasattr(__snake_case , "depth_multiplier" ) )
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=3 , __snake_case=3_2 , __snake_case=0.25 , __snake_case=8 , __snake_case=True , __snake_case=1_0_2_4 , __snake_case=3_2 , __snake_case="relu6" , __snake_case=0.1 , __snake_case=0.02 , __snake_case=True , __snake_case=True , __snake_case=1_0 , __snake_case=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: Dict = parent
UpperCAmelCase: Optional[int] = batch_size
UpperCAmelCase: Optional[int] = num_channels
UpperCAmelCase: List[str] = image_size
UpperCAmelCase: Optional[Any] = depth_multiplier
UpperCAmelCase: List[Any] = min_depth
UpperCAmelCase: Optional[Any] = tf_padding
UpperCAmelCase: List[Any] = int(last_hidden_size * depth_multiplier )
UpperCAmelCase: Dict = output_stride
UpperCAmelCase: List[Any] = hidden_act
UpperCAmelCase: Union[str, Any] = classifier_dropout_prob
UpperCAmelCase: Tuple = use_labels
UpperCAmelCase: Tuple = is_training
UpperCAmelCase: int = num_labels
UpperCAmelCase: Union[str, Any] = initializer_range
UpperCAmelCase: Optional[Any] = scope
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase: List[Any] = None
UpperCAmelCase: Optional[Any] = None
if self.use_labels:
UpperCAmelCase: int = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase: Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase: Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ) -> Dict:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase: List[Any] = MobileNetVaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase: Optional[int] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
UpperCAmelCase: Dict = self.num_labels
UpperCAmelCase: Optional[int] = MobileNetVaForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase: List[str] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase: int = self.prepare_config_and_inputs()
UpperCAmelCase: Union[str, Any] = config_and_inputs
UpperCAmelCase: Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( lowercase , lowercase , unittest.TestCase ):
lowerCamelCase__: List[str] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCamelCase__: Union[str, Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Optional[int] = False
lowerCamelCase__: Any = False
lowerCamelCase__: Optional[int] = False
lowerCamelCase__: Any = False
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase: str = MobileNetVaConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def A__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def A__ ( self ) -> Tuple:
"""simple docstring"""
pass
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase: List[str] = model_class(__snake_case )
UpperCAmelCase: List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase: str = [*signature.parameters.keys()]
UpperCAmelCase: Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __snake_case )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A__ ( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__snake_case , __snake_case , __snake_case ):
UpperCAmelCase: Any = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase: Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase: int = outputs.hidden_states
UpperCAmelCase: Dict = 2_6
self.assertEqual(len(__snake_case ) , __snake_case )
UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase: Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase: Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def A__ ( self ) -> Tuple:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase: Dict = MobileNetVaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase: Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: Any = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__snake_case )
UpperCAmelCase: int = self.default_image_processor
UpperCAmelCase: Dict = prepare_img()
UpperCAmelCase: List[str] = image_processor(images=__snake_case , return_tensors="pt" ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase: List[str] = model(**__snake_case )
# verify the logits
UpperCAmelCase: Dict = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase: Optional[int] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
| 704 |
from __future__ import annotations
import numpy as np
def __UpperCAmelCase ( snake_case_ : list[float] ):
'''simple docstring'''
return np.maximum(0 , snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 166 | 0 |
from typing import Any
def lowerCamelCase_ ( UpperCamelCase__ : list ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
__lowerCamelCase = [input_list.count(lowerCamelCase_ ) for value in input_list]
__lowerCamelCase = max(lowerCamelCase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCamelCase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : Optional[Any] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase_ = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.0_1),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase ( cls ):
UpperCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def lowerCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def lowerCamelCase ( self ):
UpperCAmelCase__ : Tuple = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
UpperCAmelCase__ : List[str] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase , repo_id='''test-config''' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
UpperCAmelCase__ : Optional[int] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase , repo_id='''valid_org/test-config-org''' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
UpperCAmelCase__ : Optional[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase ( self ):
CustomConfig.register_for_auto_class()
UpperCAmelCase__ : int = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self ):
UpperCAmelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase__ : Optional[int] = c.n_embd + 1 # int
UpperCAmelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
UpperCAmelCase__ : Union[str, Any] = not c.scale_attn_weights # bool
UpperCAmelCase__ : int = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_UpperCAmelCase , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(_UpperCAmelCase , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(_UpperCAmelCase , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(_UpperCAmelCase , c.summary_type , '''mismatch for key: summary_type''' )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = PretrainedConfig()
UpperCAmelCase__ : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_UpperCAmelCase , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
UpperCAmelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(_UpperCAmelCase , _UpperCAmelCase )]
if len(_UpperCAmelCase ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_UpperCAmelCase )}.""" )
def lowerCamelCase ( self ):
with self.assertRaises(_UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
UpperCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCamelCase ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase__ : Any = mock.Mock()
UpperCAmelCase__ : Union[str, Any] = 500
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : str = HTTPError
UpperCAmelCase__ : List[str] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase__ : str = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Any = AutoConfig.from_pretrained('''bert-base-cased''' )
UpperCAmelCase__ : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_UpperCAmelCase )
UpperCAmelCase__ : List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(_UpperCAmelCase , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase__ : str = ['''config.42.0.0.json''']
UpperCAmelCase__ : str = 768
configuration.save_pretrained(_UpperCAmelCase )
shutil.move(os.path.join(_UpperCAmelCase , '''config.4.0.0.json''' ) , os.path.join(_UpperCAmelCase , '''config.42.0.0.json''' ) )
UpperCAmelCase__ : str = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 768 )
def lowerCamelCase ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
UpperCAmelCase__ : int = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase__ : Optional[int] = '''v4.0.0'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase__ : List[Any] = '''v3.0.0'''
UpperCAmelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 768 ) | 599 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def lowerCAmelCase__ ( a_ : Tuple , a_ : Dict , a_ : Tuple ) -> List[str]:
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(repo_id=a_ , path=a_ , revision=a_ )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a_ )}""" | 599 | 1 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__)
class _a ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any , *_lowercase : Any , **_lowercase : Union[str, Any] ) -> List[str]:
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(_lowercase )
def __lowercase ( self : List[str] , _lowercase : Tuple=None , _lowercase : str=None , _lowercase : Optional[Any]=None , **_lowercase : List[str] ) -> Any:
snake_case , snake_case : Any = {}, {}
if padding is not None:
snake_case : List[str] = padding
if truncation is not None:
snake_case : List[Any] = truncation
if top_k is not None:
snake_case : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , _lowercase : Union["Image.Image", str] , _lowercase : str = None , **_lowercase : Optional[Any] ) -> Union[str, Any]:
if isinstance(_lowercase , (Image.Image, str) ) and isinstance(_lowercase , _lowercase ):
snake_case : Optional[Any] = {"image": image, "question": question}
else:
snake_case : int = image
snake_case : str = super().__call__(_lowercase , **_lowercase )
return results
def __lowercase ( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str]=False , _lowercase : List[Any]=False ) -> str:
snake_case : Optional[Any] = load_image(inputs["image"] )
snake_case : List[Any] = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=_lowercase , truncation=_lowercase )
snake_case : Union[str, Any] = self.image_processor(images=_lowercase , return_tensors=self.framework )
model_inputs.update(_lowercase )
return model_inputs
def __lowercase ( self : Union[str, Any] , _lowercase : Optional[int] ) -> Optional[Any]:
snake_case : str = self.model(**_lowercase )
return model_outputs
def __lowercase ( self : Dict , _lowercase : Tuple , _lowercase : int=5 ) -> str:
if top_k > self.model.config.num_labels:
snake_case : Any = self.model.config.num_labels
if self.framework == "pt":
snake_case : str = model_outputs.logits.sigmoid()[0]
snake_case , snake_case : List[str] = probs.topk(_lowercase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case : Optional[Any] = scores.tolist()
snake_case : Any = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
| 449 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case : List[str] = flax_key_tuple[:-1] + ("weight",)
snake_case : Union[str, Any] = torch.permute(lowerCamelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_ ):
# linear layer
snake_case : int = flax_key_tuple[:-1] + ("weight",)
snake_case : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case : Any = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: str ):
"""simple docstring"""
if "metadata" in layer:
snake_case : Dict = layer.split("metadata" )
snake_case : Optional[Any] = "".join(split_layer[0] )[:-1]
snake_case : Any = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
snake_case : List[str] = layer.split("kvstore" )
snake_case : Tuple = "".join(split_layer[0] )[:-1]
snake_case : Union[str, Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
snake_case : List[Any] = layer.split("/" )
snake_case : Union[str, Any] = "/".join(split_layer[:-1] )
snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case : str = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
snake_case : Tuple = "file"
else:
snake_case : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict ):
"""simple docstring"""
snake_case : Optional[int] = rename_keys(lowerCamelCase_ )
snake_case : str = {}
for k, v in current_block.items():
snake_case : List[str] = v
snake_case : List[str] = new_current_block
torch.save(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict , lowerCamelCase_: int , lowerCamelCase_: str = WEIGHTS_NAME ):
"""simple docstring"""
snake_case : List[str] = convert_file_size_to_int(lowerCamelCase_ )
snake_case : List[Any] = []
snake_case : Dict = {}
snake_case : str = 0
snake_case : List[str] = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
snake_case : List[Any] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
snake_case : Union[str, Any] = flatten_dict(lowerCamelCase_ , sep="/" )
snake_case : Optional[int] = {}
for layer in checkpoint_info.keys():
snake_case , snake_case , snake_case : Union[str, Any] = get_key_and_tensorstore_dict(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if curr_real_layer_name in all_layers:
snake_case : str = content
else:
snake_case : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case : Tuple = torch.tensor(lowerCamelCase_ )
snake_case : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case , snake_case : Dict = rename_base_flax_keys(tuple(key.split("/" ) ) , lowerCamelCase_ )
snake_case : Union[str, Any] = "/".join(lowerCamelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case : str = os.path.join(
lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case : Any = {}
snake_case : Union[str, Any] = 0
snake_case : Any = raw_weights.to(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case : List[Any] = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCamelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case : List[Any] = {}
snake_case : Dict = {}
for idx, shard in enumerate(lowerCamelCase_ ):
snake_case : List[Any] = weights_name.replace(
".bin" , f'''-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
snake_case : Tuple = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case : Union[str, Any] = shard
for key in shard:
snake_case : List[Any] = shard_file
# Add the metadata
snake_case : Optional[int] = {"total_size": total_size}
snake_case : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , "w" , encoding="utf-8" ) as f:
snake_case : Tuple = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + "\n"
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case : List[Any] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
snake_case : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
snake_case : Dict = TaTokenizer.from_pretrained("t5-small" )
snake_case : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
snake_case : Dict = tokenizer(lowerCamelCase_ , return_tensors="pt" ).input_ids
snake_case : Optional[int] = model.generate(lowerCamelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 449 | 1 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCAmelCase__ = True
except ImportError:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _A ( A__ ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' ,action='''store_true''' ,help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' ,type=lowercase__ ,help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' ,type=lowercase__ ,help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowercase__ )
def __init__( self : List[Any] ,lowercase__ : bool ,lowercase__ : str ,lowercase__ : Tuple=None ,*lowercase__ : int ):
__lowercase = testing
__lowercase = testing_file
__lowercase = path
def SCREAMING_SNAKE_CASE ( self : List[str] ):
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowercase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:2_2]]
if len(lowercase__ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
__lowercase = (
Path(lowercase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__lowercase = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase__ ) )
else:
with open(self._testing_file ,'''r''' ) as configuration_file:
__lowercase = json.load(lowercase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=lowercase__ ,extra_context=lowercase__ ,)
__lowercase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' ,'''r''' ) as configuration_file:
__lowercase = json.load(lowercase__ )
__lowercase = configuration['''lowercase_modelname''']
__lowercase = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"{directory}/configuration.json" )
__lowercase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
__lowercase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
__lowercase = '''Flax''' in generate_tensorflow_pytorch_and_flax
__lowercase = F"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
os.makedirs(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}" ,exist_ok=lowercase__ )
# Tests require submodules as they have parent imports
with open(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" ,'''w''' ):
pass
shutil.move(
F"{directory}/__init__.py" ,F"{model_dir}/__init__.py" ,)
shutil.move(
F"{directory}/configuration_{lowercase_model_name}.py" ,F"{model_dir}/configuration_{lowercase_model_name}.py" ,)
def remove_copy_lines(lowercase__ : Optional[int] ):
with open(lowercase__ ,'''r''' ) as f:
__lowercase = f.readlines()
with open(lowercase__ ,'''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_{lowercase_model_name}.py" ,F"{model_dir}/modeling_{lowercase_model_name}.py" ,)
shutil.move(
F"{directory}/test_modeling_{lowercase_model_name}.py" ,F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" ,)
else:
os.remove(F"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_tf_{lowercase_model_name}.py" ,F"{model_dir}/modeling_tf_{lowercase_model_name}.py" ,)
shutil.move(
F"{directory}/test_modeling_tf_{lowercase_model_name}.py" ,F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" ,)
else:
os.remove(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_flax_{lowercase_model_name}.py" ,F"{model_dir}/modeling_flax_{lowercase_model_name}.py" ,)
shutil.move(
F"{directory}/test_modeling_flax_{lowercase_model_name}.py" ,F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" ,)
else:
os.remove(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/{lowercase_model_name}.md" ,F"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" ,)
shutil.move(
F"{directory}/tokenization_{lowercase_model_name}.py" ,F"{model_dir}/tokenization_{lowercase_model_name}.py" ,)
shutil.move(
F"{directory}/tokenization_fast_{lowercase_model_name}.py" ,F"{model_dir}/tokenization_{lowercase_model_name}_fast.py" ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase__ : str ,lowercase__ : str ,lowercase__ : List[str] ):
# Create temp file
__lowercase , __lowercase = mkstemp()
__lowercase = False
with fdopen(lowercase__ ,'''w''' ) as new_file:
with open(lowercase__ ) as old_file:
for line in old_file:
new_file.write(lowercase__ )
if line_to_copy_below in line:
__lowercase = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase__ )
if not line_found:
raise ValueError(F"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(lowercase__ ,lowercase__ )
# Remove original file
remove(lowercase__ )
# Move new file
move(lowercase__ ,lowercase__ )
def skip_units(lowercase__ : Union[str, Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase__ : Any ):
with open(lowercase__ ) as datafile:
__lowercase = []
__lowercase = False
__lowercase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowercase = line.split('''"''' )[1]
__lowercase = skip_units(lowercase__ )
elif "# Below: " in line and "##" not in line:
__lowercase = line.split('''"''' )[1]
__lowercase = skip_units(lowercase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = []
elif "# Replace with" in line and "##" not in line:
__lowercase = []
elif "##" not in line:
lines_to_copy.append(lowercase__ )
remove(lowercase__ )
replace_in_files(F"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(lowercase__ )
| 624 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ ,'''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowercase__ ,'''num_heads''' ) )
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : List[Any]=6_4 ,lowercase__ : Optional[int]=3 ,lowercase__ : Dict=[1_6, 4_8, 9_6] ,lowercase__ : Optional[Any]=[1, 3, 6] ,lowercase__ : Tuple=[1, 2, 1_0] ,lowercase__ : Optional[int]=[7, 3, 3] ,lowercase__ : str=[4, 2, 2] ,lowercase__ : Dict=[2, 1, 1] ,lowercase__ : Tuple=[2, 2, 2] ,lowercase__ : Tuple=[False, False, True] ,lowercase__ : int=[0.0, 0.0, 0.0] ,lowercase__ : str=0.0_2 ,lowercase__ : Union[str, Any]=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[Any]=2 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_sizes
__lowercase = patch_stride
__lowercase = patch_padding
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = num_heads
__lowercase = stride_kv
__lowercase = depth
__lowercase = cls_token
__lowercase = attention_drop_rate
__lowercase = initializer_range
__lowercase = layer_norm_eps
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[str] ):
__lowercase = CvtModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Dict ):
__lowercase = self.num_labels
__lowercase = CvtForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[int] = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = False
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : str ):
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
def check_hidden_states_output(lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : str ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depth )
self.assertEqual(len(lowercase__ ) ,lowercase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = CvtModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
| 624 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
_UpperCamelCase : str = inspect.getfile(accelerate.test_utils )
_UpperCamelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_UpperCamelCase : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
_UpperCamelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
_UpperCamelCase : Any = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
print(F'''Found {torch.cuda.device_count()} devices.''' )
_UpperCamelCase : Tuple = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
_UpperCamelCase : Any = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
_UpperCamelCase : Optional[Any] = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase__ = Accelerator()
lowerCamelCase__ = (accelerator.state.process_index + 2, 10)
lowerCamelCase__ = torch.randint(0, 10, shape).to(accelerator.device)
lowerCamelCase__ = """"""
lowerCamelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 624 |
lowerCamelCase__ : List[str] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase__ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 12 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a__ ,a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =emb.weight.shape
_SCREAMING_SNAKE_CASE =nn.Linear(a__ ,a__ ,bias=a__)
_SCREAMING_SNAKE_CASE =emb.weight.data
return lin_layer
def lowerCamelCase( a__ ,a__=None):
_SCREAMING_SNAKE_CASE ={}
for old_key in state_dict.keys():
_SCREAMING_SNAKE_CASE =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_SCREAMING_SNAKE_CASE =key.replace('''moe_layer.experts.0''' ,f"ffn.experts.expert_{expert_idx}")
else:
_SCREAMING_SNAKE_CASE =key.replace('''moe_layer.experts.''' ,'''ffn.experts.expert_''')
if "gate" in key:
_SCREAMING_SNAKE_CASE =key.replace('''.moe_layer.gate.wg''' ,'''.ffn.router.classifier''')
if "fc2" and "experts" not in key:
_SCREAMING_SNAKE_CASE =key.replace('''.fc2.''' ,'''.ffn.fc2.''')
if "fc1" and "experts" not in key:
_SCREAMING_SNAKE_CASE =key.replace('''.fc1.''' ,'''.ffn.fc1.''')
if ".encoder_attn." in key:
_SCREAMING_SNAKE_CASE =key.replace('''.encoder_attn.''' ,'''.cross_attention.''')
if "encoder_attn_layer_norm" in key:
_SCREAMING_SNAKE_CASE =key.replace('''encoder_attn_layer_norm''' ,'''cross_attention_layer_norm''')
if "final_layer_norm" in key:
_SCREAMING_SNAKE_CASE =key.replace('''final_layer_norm''' ,'''ff_layer_norm''')
_SCREAMING_SNAKE_CASE =state_dict[old_key]
return new_dict
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__ = WEIGHTS_NAME):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =0
os.makedirs(a__ ,exist_ok=a__)
for expert in range(a__):
_SCREAMING_SNAKE_CASE =switch_checkpoint_path + f"-rank-{expert}.pt"
if os.path.isfile(a__):
_SCREAMING_SNAKE_CASE =torch.load(a__)['''model''']
remove_ignore_keys_(a__)
_SCREAMING_SNAKE_CASE =rename_fairseq_keys(a__ ,a__)
_SCREAMING_SNAKE_CASE =os.path.join(
a__ ,weights_name.replace('''.bin''' ,f"-{len(a__)+1:05d}-of-???.bin"))
torch.save(a__ ,a__)
sharded_state_dicts.append(expert_state.keys())
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
expert_state[list(a__)[0]].dtype)
# Add the last block
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,weights_name.replace('''.bin''' ,f"-{len(a__)+1:05d}-of-???.bin"))
_SCREAMING_SNAKE_CASE =torch.load(switch_checkpoint_path + '''-shared.pt''')['''model''']
remove_ignore_keys_(a__)
_SCREAMING_SNAKE_CASE =rename_fairseq_keys(a__ ,a__)
_SCREAMING_SNAKE_CASE =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys())
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(a__) == 1:
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,a__)
torch.save(a__ ,a__)
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(a__ ,a__)
# Otherwise, let's build the index
_SCREAMING_SNAKE_CASE ={}
for idx, shard in enumerate(a__):
_SCREAMING_SNAKE_CASE =weights_name.replace('''.bin''' ,f"-{idx+1:05d}-of-{len(a__):05d}.bin")
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,weights_name.replace('''.bin''' ,f"-{idx+1:05d}-of-???.bin"))
os.rename(a__ ,os.path.join(a__ ,a__))
for key in shard:
_SCREAMING_SNAKE_CASE =shard_file
# Add the metadata
_SCREAMING_SNAKE_CASE ={'''total_size''': total_size}
_SCREAMING_SNAKE_CASE ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(a__ ,a__) ,'''w''' ,encoding='''utf-8''') as f:
_SCREAMING_SNAKE_CASE =json.dumps(a__ ,indent=2 ,sort_keys=a__) + '''\n'''
f.write(a__)
return metadata, index
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
snake_case_ : Tuple = parser.parse_args()
snake_case_ , snake_case_ : Optional[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
snake_case_ : int = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case_ : int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 191 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : str = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "data2vec-text"
def __init__( self : Optional[int] , _a : Union[str, Any]=3_0522 , _a : Any=768 , _a : List[Any]=12 , _a : Any=12 , _a : List[str]=3072 , _a : Optional[Any]="gelu" , _a : List[str]=0.1 , _a : Optional[Any]=0.1 , _a : Optional[Any]=512 , _a : Any=2 , _a : Optional[Any]=0.02 , _a : Dict=1E-12 , _a : int=1 , _a : Any=0 , _a : List[Any]=2 , _a : Dict="absolute" , _a : Optional[Any]=True , _a : Dict=None , **_a : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =position_embedding_type
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =classifier_dropout
class A__ ( UpperCamelCase__ ):
@property
def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 191 | 1 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCamelCase ( _A ) -> float:
return np.dot(_A , _A )
class UpperCamelCase :
def __init__( self :Union[str, Any] , *,
__magic_name__ :float = np.inf , __magic_name__ :str = "linear" , __magic_name__ :float = 0.0 , ) ->None:
lowercase : Dict = regularization
lowercase : Optional[int] = gamma
if kernel == "linear":
lowercase : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
lowercase : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase : Union[str, Any] = f"""Unknown kernel: {kernel}"""
raise ValueError(__magic_name__ )
def __snake_case ( self :Dict , __magic_name__ :ndarray , __magic_name__ :ndarray ) ->float:
return np.dot(__magic_name__ , __magic_name__ )
def __snake_case ( self :List[Any] , __magic_name__ :ndarray , __magic_name__ :ndarray ) ->float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __snake_case ( self :int , __magic_name__ :list[ndarray] , __magic_name__ :ndarray ) ->None:
lowercase : Optional[Any] = observations
lowercase : str = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowercase) , ) : str = np.shape(__magic_name__ )
def to_minimize(__magic_name__ :ndarray ) -> float:
lowercase : Optional[int] = 0
((lowercase) , ) : Union[str, Any] = np.shape(__magic_name__ )
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__magic_name__ )
lowercase : List[Any] = LinearConstraint(__magic_name__ , 0 , 0 )
lowercase : Optional[int] = Bounds(0 , self.regularization )
lowercase : int = minimize(
__magic_name__ , np.ones(__magic_name__ ) , bounds=__magic_name__ , constraints=[ly_contraint] ).x
lowercase : Tuple = l_star
# calculating mean offset of separation plane to points
lowercase : Optional[Any] = 0
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowercase : List[str] = s / n
def __snake_case ( self :Any , __magic_name__ :ndarray ) ->int:
lowercase : List[str] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __magic_name__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
_lowerCAmelCase = {
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def UpperCamelCase ( _A ) -> List[str]:
lowercase : List[str] = EfficientNetConfig()
lowercase : Any = CONFIG_MAP[model_name]["""hidden_dim"""]
lowercase : List[str] = CONFIG_MAP[model_name]["""width_coef"""]
lowercase : str = CONFIG_MAP[model_name]["""depth_coef"""]
lowercase : int = CONFIG_MAP[model_name]["""image_size"""]
lowercase : List[Any] = CONFIG_MAP[model_name]["""dropout_rate"""]
lowercase : int = CONFIG_MAP[model_name]["""dw_padding"""]
lowercase : Optional[int] = """huggingface/label-files"""
lowercase : int = """imagenet-1k-id2label.json"""
lowercase : Any = 1_000
lowercase : Any = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
lowercase : Optional[int] = {int(_A ): v for k, v in idalabel.items()}
lowercase : int = idalabel
lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ) -> Tuple:
lowercase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Optional[int] = Image.open(requests.get(_A , stream=_A ).raw )
return im
def UpperCamelCase ( _A ) -> Optional[Any]:
lowercase : str = CONFIG_MAP[model_name]["""image_size"""]
lowercase : Optional[int] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_A , )
return preprocessor
def UpperCamelCase ( _A ) -> Optional[int]:
lowercase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
lowercase : Optional[Any] = sorted(set(_A ) )
lowercase : Dict = len(_A )
lowercase : List[str] = {b: str(_A ) for b, i in zip(_A , range(_A ) )}
lowercase : Union[str, Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
lowercase : str = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
lowercase : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase : Optional[int] = """efficientnet.""" + item[1]
lowercase : Any = """classifier.weight"""
lowercase : Tuple = """classifier.bias"""
return key_mapping
def UpperCamelCase ( _A , _A , _A ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase : str = torch.from_numpy(_A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase : Optional[int] = torch.from_numpy(_A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase : List[Any] = torch.from_numpy(np.transpose(_A ) )
else:
lowercase : Optional[int] = torch.from_numpy(_A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_A )
@torch.no_grad()
def UpperCamelCase ( _A , _A , _A , _A ) -> str:
lowercase : Any = model_classes[model_name](
include_top=_A , weights="""imagenet""" , input_tensor=_A , input_shape=_A , pooling=_A , classes=1_000 , classifier_activation="""softmax""" , )
lowercase : Dict = original_model.trainable_variables
lowercase : Any = original_model.non_trainable_variables
lowercase : Any = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase : Dict = param.numpy()
lowercase : List[str] = list(tf_params.keys() )
# Load HuggingFace model
lowercase : str = get_efficientnet_config(_A )
lowercase : List[Any] = EfficientNetForImageClassification(_A ).eval()
lowercase : Optional[int] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
lowercase : int = rename_keys(_A )
replace_params(_A , _A , _A )
# Initialize preprocessor and preprocess input image
lowercase : Optional[int] = convert_image_processor(_A )
lowercase : Any = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase : Union[str, Any] = hf_model(**_A )
lowercase : List[Any] = outputs.logits.detach().numpy()
# Original model inference
lowercase : Optional[Any] = False
lowercase : str = CONFIG_MAP[model_name]["""image_size"""]
lowercase : Optional[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase : Optional[Any] = image.img_to_array(_A )
lowercase : Dict = np.expand_dims(_A , axis=0 )
lowercase : List[str] = original_model.predict(_A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_A , _A , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_A ):
os.mkdir(_A )
# Save converted model and image processor
hf_model.save_pretrained(_A )
preprocessor.save_pretrained(_A )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase : Dict = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_A )
hf_model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
_lowerCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 264 | 1 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = 1.5
UpperCAmelCase_ = int(factor * num_class_images )
UpperCAmelCase_ = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=__UpperCamelCase )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase_ = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
UpperCAmelCase_ = int(factor * num_images )
UpperCAmelCase_ = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = tqdm(desc="downloading real regularization images" , total=__UpperCamelCase )
with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
f"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
UpperCAmelCase_ = class_images[count]
count += 1
try:
UpperCAmelCase_ = requests.get(images["url"] )
if img.status_code == 200:
UpperCAmelCase_ = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("" , add_help=__UpperCamelCase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__magic_name__ = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = {}
state_dict.pop("pixel_mean" , lowerCAmelCase_)
state_dict.pop("pixel_std" , lowerCAmelCase_)
lowerCamelCase_ : List[str] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase_ : List[str] = key.replace(lowerCAmelCase_ , lowerCAmelCase_)
if re.match(lowerCAmelCase_ , lowerCAmelCase_):
lowerCamelCase_ : List[Any] = int(re.match(lowerCAmelCase_ , lowerCAmelCase_).group(2))
if layer_nb == 0:
lowerCamelCase_ : Tuple = key.replace("layers.0" , "proj_in")
elif layer_nb == 1:
lowerCamelCase_ : List[Any] = key.replace("layers.1" , "layers.0")
elif layer_nb == 2:
lowerCamelCase_ : Optional[Any] = key.replace("layers.2" , "proj_out")
lowerCamelCase_ : List[Any] = value
lowerCamelCase_ : Union[str, Any] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="ybelkada/segment-anything"):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = hf_hub_download(lowerCAmelCase_ , F"""checkpoints/{model_name}.pth""")
if "sam_vit_b" in model_name:
lowerCamelCase_ : str = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase_ : Union[str, Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase_ : Tuple = SamConfig(
vision_config=lowerCAmelCase_ , )
elif "sam_vit_h" in model_name:
lowerCamelCase_ : Dict = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase_ : Dict = SamConfig(
vision_config=lowerCAmelCase_ , )
lowerCamelCase_ : Any = torch.load(lowerCAmelCase_ , map_location="cpu")
lowerCamelCase_ : Dict = replace_keys(lowerCAmelCase_)
lowerCamelCase_ : Optional[Any] = SamImageProcessor()
lowerCamelCase_ : Dict = SamProcessor(image_processor=lowerCAmelCase_)
lowerCamelCase_ : List[str] = SamModel(lowerCAmelCase_)
hf_model.load_state_dict(lowerCAmelCase_)
lowerCamelCase_ : List[Any] = hf_model.to("cuda")
lowerCamelCase_ : List[str] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCamelCase_ : List[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_).raw).convert("RGB")
lowerCamelCase_ : Union[str, Any] = [[[400, 650]]]
lowerCamelCase_ : Dict = [[1]]
lowerCamelCase_ : List[str] = processor(images=np.array(lowerCAmelCase_) , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCamelCase_ : Optional[int] = hf_model(**lowerCAmelCase_)
lowerCamelCase_ : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
lowerCamelCase_ : Any = processor(
images=np.array(lowerCAmelCase_) , input_points=lowerCAmelCase_ , input_labels=lowerCAmelCase_ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCamelCase_ : Any = hf_model(**lowerCAmelCase_)
lowerCamelCase_ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
lowerCamelCase_ : Union[str, Any] = ((75, 275, 1725, 850),)
lowerCamelCase_ : Optional[Any] = processor(images=np.array(lowerCAmelCase_) , input_boxes=lowerCAmelCase_ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCamelCase_ : Optional[int] = hf_model(**lowerCAmelCase_)
lowerCamelCase_ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
lowerCamelCase_ : List[str] = [[[400, 650], [800, 650]]]
lowerCamelCase_ : str = [[1, 1]]
lowerCamelCase_ : Union[str, Any] = processor(
images=np.array(lowerCAmelCase_) , input_points=lowerCAmelCase_ , input_labels=lowerCAmelCase_ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCamelCase_ : int = hf_model(**lowerCAmelCase_)
lowerCamelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
__magic_name__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 250 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__magic_name__ = 1.054_571_817E-34 # unit of ℏ : J * s
__magic_name__ = 3E8 # unit of c : m * s^-1
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if (force, area, distance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if force < 0:
raise ValueError("Magnitude of force can not be negative")
if distance < 0:
raise ValueError("Distance can not be negative")
if area < 0:
raise ValueError("Area can not be negative")
if force == 0:
lowerCamelCase_ : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase_ : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase_ : Any = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0")
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowercase :
def __init__( self , UpperCamelCase = "cpu" , UpperCamelCase = "openai/clip-vit-large-patch14" ) -> None:
__a = device
__a = CLIPTokenizerFast.from_pretrained(UpperCamelCase )
__a = [0.48_145_466, 0.4_578_275, 0.40_821_073]
__a = [0.26_862_954, 0.26_130_258, 0.27_577_711]
__a = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__a = torchvision.transforms.Resize(224 )
__a = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Union[str, Any]:
__a = self.resize(UpperCamelCase )
__a = self.center_crop(UpperCamelCase )
__a = self.normalize(UpperCamelCase )
return images
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ) -> List[str]:
__a = self.tokenizer(text=UpperCamelCase , **UpperCamelCase )
__a = self.preprocess_img(UpperCamelCase )
__a = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __lowercase ( nn.Module ):
def __init__( self , UpperCamelCase=10 , UpperCamelCase=0.01 , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase="image" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , ) -> None:
super().__init__()
__a = None
__a = device if device else get_device()
if vqgan:
__a = vqgan
else:
__a = load_vqgan(self.device , conf_path=UpperCamelCase , ckpt_path=UpperCamelCase )
self.vqgan.eval()
if clip:
__a = clip
else:
__a = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
__a = ProcessorGradientFlow(device=self.device )
__a = iterations
__a = lr
__a = log
__a = make_grid
__a = return_val
__a = quantize
__a = self.vqgan.decoder.z_shape
def UpperCamelCase__ ( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=5 , UpperCamelCase=True ) -> List[str]:
__a = []
if output_path is None:
__a = './animation.gif'
if input_path is None:
__a = self.save_path
__a = sorted(glob(input_path + '/*' ) )
if not len(UpperCamelCase ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(UpperCamelCase ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
__a = total_duration / len(UpperCamelCase )
__a = [frame_duration] * len(UpperCamelCase )
if extend_frames:
__a = 1.5
__a = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(UpperCamelCase ) )
imageio.mimsave(UpperCamelCase , UpperCamelCase , duration=UpperCamelCase )
print(f"gif saved to {output_path}" )
def UpperCamelCase__ ( self , UpperCamelCase=None , UpperCamelCase=None ) -> Any:
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
__a = preprocess(Image.open(UpperCamelCase ) , target_image_size=256 ).to(self.device )
__a = preprocess_vqgan(UpperCamelCase )
__a , *__a = self.vqgan.encode(UpperCamelCase )
return z
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[Any]:
__a = self.latent.detach().requires_grad_()
__a = base_latent + transform_vector
if self.quantize:
__a , *__a = self.vqgan.quantize(UpperCamelCase )
else:
__a = trans_latent
return self.vqgan.decode(UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> List[str]:
__a = self.clip_preprocessor(text=UpperCamelCase , images=UpperCamelCase , return_tensors='pt' , padding=UpperCamelCase )
__a = self.clip(**UpperCamelCase )
__a = clip_outputs.logits_per_image
if weights is not None:
__a = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
__a = self._get_clip_similarity(pos_prompts['prompts'] , UpperCamelCase , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
__a = self._get_clip_similarity(neg_prompts['prompts'] , UpperCamelCase , weights=neg_prompts['weights'] )
else:
__a = torch.tensor([1] , device=self.device )
__a = -torch.log(UpperCamelCase ) + torch.log(UpperCamelCase )
return loss
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
__a = torch.randn_like(self.latent , requires_grad=UpperCamelCase , device=self.device )
__a = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__a = self._add_vector(UpperCamelCase )
__a = loop_post_process(UpperCamelCase )
__a = self._get_CLIP_loss(UpperCamelCase , UpperCamelCase , UpperCamelCase )
print('CLIP loss' , UpperCamelCase )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
wandb.init(reinit=UpperCamelCase , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
__a = Image.open(UpperCamelCase )
__a = image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(UpperCamelCase ) )
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[Any]:
if not prompts:
return []
__a = []
__a = []
if isinstance(UpperCamelCase , UpperCamelCase ):
__a = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(UpperCamelCase , (tuple, list) ):
__a = prompt[0]
__a = float(prompt[1] )
elif ":" in prompt:
__a , __a = prompt.split(':' )
__a = float(UpperCamelCase )
else:
__a = prompt
__a = 1.0
processed_prompts.append(UpperCamelCase )
weights.append(UpperCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase , device=self.device ),
}
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=None , ) -> Any:
if image_path:
__a = self._get_latent(UpperCamelCase )
else:
__a = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
__a = self.process_prompts(UpperCamelCase )
__a = self.process_prompts(UpperCamelCase )
if save_final and save_path is None:
__a = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(UpperCamelCase ):
os.makedirs(UpperCamelCase )
else:
__a = save_path + '_' + get_timestamp()
os.makedirs(UpperCamelCase )
__a = save_path
__a = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(UpperCamelCase ) )
__a = loop_post_process(UpperCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase , UpperCamelCase , UpperCamelCase ) ):
if show_intermediate:
show_pil(UpperCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(UpperCamelCase )} )
if show_final:
show_pil(UpperCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png" ) )
| 490 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "▁"
UpperCAmelCase_ = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase_ = {
"facebook/nllb-200-distilled-600M": 10_24,
}
# fmt: off
UpperCAmelCase_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowercase ( __magic_name__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = ["""input_ids""", """attention_mask"""]
_a = []
_a = []
def __init__( self , UpperCamelCase , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase = None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
__a = legacy_behaviour
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , tokenizer_file=UpperCamelCase , src_lang=UpperCamelCase , tgt_lang=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase , **UpperCamelCase , )
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__a = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a = 1
__a = len(self.sp_model )
__a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase )
}
__a = {v: k for k, v in self.lang_code_to_id.items()}
__a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__a = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__a = src_lang if src_lang is not None else 'eng_Latn'
__a = self.lang_code_to_id[self._src_lang]
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Union[str, Any]:
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase ) -> str:
__a = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self , UpperCamelCase ) -> None:
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
__a = [1] * len(self.prefix_tokens )
__a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase )) + ([0] * len(UpperCamelCase )) + suffix_ones
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__a = src_lang
__a = self(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
__a = self.convert_tokens_to_ids(UpperCamelCase )
__a = tgt_lang_id
return inputs
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[str]:
return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , UpperCamelCase ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , UpperCamelCase ) -> str:
__a = ''.join(UpperCamelCase ).replace(UpperCamelCase , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , 'wb' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = "eng_Latn" , UpperCamelCase = None , UpperCamelCase = "fra_Latn" , **UpperCamelCase , ) -> BatchEncoding:
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def UpperCamelCase__ ( self ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self , UpperCamelCase ) -> None:
__a = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
def UpperCamelCase__ ( self , UpperCamelCase ) -> None:
__a = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
| 490 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
def UpperCamelCase (lowercase_: "pyspark.sql.DataFrame" , lowercase_: List[int] , ) -> Dict:
import pyspark
def generate_fn():
A__ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
A__ : List[Any] = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
A__ : Optional[int] = partition_df.collect()
A__ : Any = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _a (_BaseExamplesIterable ):
'''simple docstring'''
def __init__( self , A__ , A__=None , ):
A__ : List[str] = df
A__ : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
A__ : Tuple = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def __A ( self , A__ ):
A__ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(A__ )
return SparkExamplesIterable(self.df , partition_order=A__ )
def __A ( self , A__ , A__ ):
A__ : Optional[int] = self.split_shard_indices_by_worker(A__ , A__ )
return SparkExamplesIterable(self.df , partition_order=A__ )
@property
def __A ( self ):
return len(self.partition_order )
class _a (datasets.DatasetBuilder ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = SparkConfig
def __init__( self , A__ , A__ = None , A__ = None , **A__ , ):
import pyspark
A__ : Dict = pyspark.sql.SparkSession.builder.getOrCreate()
A__ : int = df
A__ : Any = working_dir
super().__init__(
cache_dir=A__ , config_name=str(self.df.semanticHash() ) , **A__ , )
def __A ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(A__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=A__ )
A__ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(A__ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A__ : int = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(A__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __A ( self , A__ ):
import pyspark
def get_arrow_batch_size(A__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
A__ : Dict = self.df.count()
A__ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A__ : Union[str, Any] = (
self.df.limit(A__ )
.repartition(1 )
.mapInArrow(A__ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A__ : str = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A__ : Dict = min(A__ , int(approx_total_size / max_shard_size ) )
A__ : int = self.df.repartition(A__ )
def __A ( self , A__ , A__ , A__ , ):
import pyspark
A__ : Optional[int] = ParquetWriter if file_format == """parquet""" else ArrowWriter
A__ : Any = os.path.join(self._working_dir , os.path.basename(A__ ) ) if self._working_dir else fpath
A__ : Union[str, Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A__ : str = self.config.features
A__ : Union[str, Any] = self._writer_batch_size
A__ : Tuple = self._fs.storage_options
def write_arrow(A__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A__ : str = pyspark.TaskContext().taskAttemptId()
A__ : str = next(A__ , A__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
A__ : List[Any] = 0
A__ : Optional[Any] = writer_class(
features=A__ , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=A__ , storage_options=A__ , embed_local_files=A__ , )
A__ : Tuple = pa.Table.from_batches([first_batch] )
writer.write_table(A__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A__ , A__ : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
A__ : Tuple = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=A__ , storage_options=A__ , embed_local_files=A__ , )
A__ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(A__ )
if writer._num_bytes > 0:
A__ , A__ : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(A__ ) ):
A__ : Optional[Any] = os.path.join(os.path.dirname(A__ ) , os.path.basename(A__ ) )
shutil.move(A__ , A__ )
A__ : Tuple = (
self.df.mapInArrow(A__ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __A ( self , A__ , A__ = "arrow" , A__ = None , A__ = None , **A__ , ):
self._validate_cache_dir()
A__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(A__ )
A__ : Any = not is_remote_filesystem(self._fs )
A__ : Optional[int] = os.path.join if is_local else posixpath.join
A__ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
A__ : Any = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
A__ : Any = path_join(self._output_dir , A__ )
A__ : Tuple = 0
A__ : str = 0
A__ : List[Any] = 0
A__ : List[Any] = []
A__ : Optional[Any] = []
for task_id, content in self._prepare_split_single(A__ , A__ , A__ ):
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(A__ )
A__ : Optional[int] = total_num_examples
A__ : Union[str, Any] = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
A__ : int = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A__ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
A__ , A__ , A__ , ):
rename(
A__ , fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , F"""{global_shard_id:05d}""" ).replace("""NNNNN""" , F"""{total_shards:05d}""" ) , )
A__ : List[Any] = []
A__ : Union[str, Any] = 0
for i in range(len(A__ ) ):
A__ , A__ : Optional[int] = task_id_and_num_shards[i]
for shard_id in range(A__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(A__ , len(A__ ) ).map(lambda A__ : _rename_shard(*A__ ) ).collect()
else:
# don't use any pattern
A__ : List[Any] = 0
A__ : List[str] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace(A__ , """""" ) , )
def __A ( self , A__ , ):
return SparkExamplesIterable(self.df )
| 456 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452 | import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=8 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=16 , __lowercase=5 , __lowercase=2 , __lowercase=36 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> List[str]:
__UpperCamelCase :Union[str, Any] = parent
__UpperCamelCase :str = batch_size
__UpperCamelCase :Union[str, Any] = seq_length
__UpperCamelCase :Optional[Any] = is_training
__UpperCamelCase :Union[str, Any] = use_input_mask
__UpperCamelCase :Any = use_token_type_ids
__UpperCamelCase :List[str] = use_labels
__UpperCamelCase :Tuple = vocab_size
__UpperCamelCase :Tuple = hidden_size
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :Tuple = num_attention_heads
__UpperCamelCase :Any = intermediate_size
__UpperCamelCase :Optional[Any] = hidden_act
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :str = attention_probs_dropout_prob
__UpperCamelCase :Optional[Any] = max_position_embeddings
__UpperCamelCase :int = type_vocab_size
__UpperCamelCase :Optional[int] = type_sequence_label_size
__UpperCamelCase :Any = initializer_range
__UpperCamelCase :List[str] = num_labels
__UpperCamelCase :Dict = num_choices
__UpperCamelCase :Union[str, Any] = scope
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :List[Any] = None
if self.use_input_mask:
__UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Union[str, Any] = None
if self.use_token_type_ids:
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCamelCase :List[str] = None
__UpperCamelCase :Tuple = None
__UpperCamelCase :Union[str, Any] = None
if self.use_labels:
__UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = self.get_config()
__UpperCamelCase :List[str] = 300
return config
def UpperCamelCase__ ( self) -> int:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Tuple = self.prepare_config_and_inputs()
__UpperCamelCase :Union[str, Any] = True
__UpperCamelCase :List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :int = MraModel(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :str = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :Tuple = model(__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :str = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[Any]:
__UpperCamelCase :Tuple = True
__UpperCamelCase :Dict = MraModel(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :int = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__UpperCamelCase :Any = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , )
__UpperCamelCase :str = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Dict = MraForMaskedLM(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Optional[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :Tuple = MraForQuestionAnswering(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Any = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :List[Any] = self.num_labels
__UpperCamelCase :str = MraForSequenceClassification(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Dict = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[int]:
__UpperCamelCase :str = self.num_labels
__UpperCamelCase :Optional[int] = MraForTokenClassification(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Optional[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = self.num_choices
__UpperCamelCase :Dict = MraForMultipleChoice(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Tuple = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :int = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Union[str, Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :str = config_and_inputs
__UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = False
a__ : Optional[int] = False
a__ : str = False
a__ : Optional[Any] = False
a__ : Union[str, Any] = ()
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :str = MraModelTester(self)
__UpperCamelCase :Tuple = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase :Optional[int] = type
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :List[Any] = MraModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@unittest.skip(reason='''MRA does not output attentions''')
def UpperCamelCase__ ( self) -> Any:
return
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = MraModel.from_pretrained('''uw-madison/mra-base-512-4''')
__UpperCamelCase :Union[str, Any] = torch.arange(256).unsqueeze(0)
with torch.no_grad():
__UpperCamelCase :List[Any] = model(__lowercase)[0]
__UpperCamelCase :Dict = torch.Size((1, 256, 768))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Optional[Any] = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :str = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''')
__UpperCamelCase :str = torch.arange(256).unsqueeze(0)
with torch.no_grad():
__UpperCamelCase :Optional[Any] = model(__lowercase)[0]
__UpperCamelCase :List[str] = 50_265
__UpperCamelCase :List[Any] = torch.Size((1, 256, vocab_size))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Optional[Any] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''')
__UpperCamelCase :Optional[int] = torch.arange(4_096).unsqueeze(0)
with torch.no_grad():
__UpperCamelCase :Tuple = model(__lowercase)[0]
__UpperCamelCase :Optional[int] = 50_265
__UpperCamelCase :Optional[Any] = torch.Size((1, 4_096, vocab_size))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :List[str] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
| 452 | 1 |
def A ( _lowercase = 1_000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 248 | def A ( _lowercase = 10**9 ):
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE : List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 248 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ = ['pixel_values']
def __init__( self : Optional[Any] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Dict , ):
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 224}
SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name="crop_size" )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE = do_convert_rgb
def _snake_case ( self : Optional[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ):
SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = get_resize_output_image_size(__lowerCamelCase , size=size["shortest_edge"] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ):
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ):
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Dict , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : List[str] , ):
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , param_name="size" , default_to_square=__lowerCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , param_name="crop_size" , default_to_square=__lowerCamelCase )
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) | 714 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neo"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_dropout
SCREAMING_SNAKE_CASE = embed_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = attention_types
SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def _snake_case ( __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ):
import torch
SCREAMING_SNAKE_CASE = input.size()
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = shape[dimension]
SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ )
SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1
SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE = [slice(A__ )] * rank
SCREAMING_SNAKE_CASE = indices
SCREAMING_SNAKE_CASE = input[s]
SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def __a ( A__ : Union[str, Any] , A__ : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE = torch.arange(1 , A__ )
SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ )
SCREAMING_SNAKE_CASE = remainders == 0
SCREAMING_SNAKE_CASE = candidates[divisor_indices]
SCREAMING_SNAKE_CASE = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _snake_case ( self : Optional[int] ):
return self._config.num_heads
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self : Optional[int] ):
return 13 | 698 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = SwinvaConfig()
__SCREAMING_SNAKE_CASE : Optional[Any] = swinva_name.split('''_''' )
__SCREAMING_SNAKE_CASE : Dict = name_split[1]
if "to" in name_split[3]:
__SCREAMING_SNAKE_CASE : int = int(name_split[3][-3:] )
else:
__SCREAMING_SNAKE_CASE : str = int(name_split[3] )
if "to" in name_split[2]:
__SCREAMING_SNAKE_CASE : Any = int(name_split[2][-2:] )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = int(name_split[2][6:] )
if model_size == "tiny":
__SCREAMING_SNAKE_CASE : int = 96
__SCREAMING_SNAKE_CASE : int = (2, 2, 6, 2)
__SCREAMING_SNAKE_CASE : Tuple = (3, 6, 12, 24)
elif model_size == "small":
__SCREAMING_SNAKE_CASE : Any = 96
__SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Optional[int] = (3, 6, 12, 24)
elif model_size == "base":
__SCREAMING_SNAKE_CASE : Optional[Any] = 128
__SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Tuple = (4, 8, 16, 32)
else:
__SCREAMING_SNAKE_CASE : Any = 192
__SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Optional[int] = (6, 12, 24, 48)
if "to" in swinva_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__SCREAMING_SNAKE_CASE : int = 21_841
__SCREAMING_SNAKE_CASE : List[Any] = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : List[str] = '''imagenet-22k-id2label.json'''
__SCREAMING_SNAKE_CASE : int = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE : Tuple = 1_000
__SCREAMING_SNAKE_CASE : str = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : str = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : int = idalabel
__SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : int = img_size
__SCREAMING_SNAKE_CASE : Tuple = num_classes
__SCREAMING_SNAKE_CASE : str = embed_dim
__SCREAMING_SNAKE_CASE : Optional[int] = depths
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : Dict = window_size
return config
def a__ ( snake_case ):
"""simple docstring"""
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE : int = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__SCREAMING_SNAKE_CASE : str = '''encoder.''' + name
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE : int = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE : Any = '''layernorm.weight'''
if name == "norm.bias":
__SCREAMING_SNAKE_CASE : int = '''layernorm.bias'''
if "head" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''head''' , '''classifier''' )
else:
__SCREAMING_SNAKE_CASE : List[str] = '''swinv2.''' + name
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : str = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
__SCREAMING_SNAKE_CASE : Dict = key.split('''.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(key_split[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = int(key_split[3] )
__SCREAMING_SNAKE_CASE : Optional[int] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim, :]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : List[str] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : str = val
return orig_state_dict
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = get_swinva_config(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification(snake_case )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
__SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(snake_case , stream=snake_case ).raw )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=snake_case , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = timm_model(inputs['''pixel_values'''] )
__SCREAMING_SNAKE_CASE : Dict = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase_ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 74 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__lowercase = {
'''camembert-base''': 512,
}
__lowercase = '''▁'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=["<s>NOTUSED", "</s>NOTUSED"] , __lowercase = None , **__lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
__UpperCamelCase :List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__UpperCamelCase :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__lowercase))
__UpperCamelCase :Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__UpperCamelCase :Tuple = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__UpperCamelCase :int = len(self.fairseq_tokens_to_ids)
__UpperCamelCase :List[Any] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
__UpperCamelCase :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase :Tuple = [self.cls_token_id]
__UpperCamelCase :Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1, 1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :List[Any] = [self.sep_token_id]
__UpperCamelCase :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def UpperCamelCase__ ( self) -> int:
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = {self.convert_ids_to_tokens(__lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__lowercase) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :Any = []
__UpperCamelCase :List[str] = ''''''
__UpperCamelCase :Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase) + token
__UpperCamelCase :Optional[Any] = True
__UpperCamelCase :List[str] = []
else:
current_sub_tokens.append(__lowercase)
__UpperCamelCase :int = False
out_string += self.sp_model.decode(__lowercase)
return out_string.strip()
def __getstate__( self) -> Dict:
__UpperCamelCase :Dict = self.__dict__.copy()
__UpperCamelCase :Any = None
return state
def __setstate__( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCamelCase :List[str] = {}
__UpperCamelCase :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowercase)
elif not os.path.isfile(self.vocab_file):
with open(__lowercase , '''wb''') as fi:
__UpperCamelCase :int = self.sp_model.serialized_model_proto()
fi.write(__lowercase)
return (out_vocab_file,)
| 167 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = 8
# DPR tok
lowerCAmelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase_ = os.path.join(self.tmpdirname, '''dpr_tokenizer''' )
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
lowerCAmelCase_ = os.path.join(UpperCamelCase__, DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase_ = dict(zip(UpperCamelCase__, range(len(UpperCamelCase__ ) ) ) )
lowerCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname, '''bart_tokenizer''' )
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
lowerCAmelCase_ = os.path.join(UpperCamelCase__, BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(UpperCamelCase__, BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''', string_factory='''Flat''', metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_dummy_dataset()
lowerCAmelCase_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCAmelCase_ = dataset
lowerCAmelCase_ = RagRetriever(
UpperCamelCase__, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
return retriever
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.get_dummy_dataset()
lowerCAmelCase_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name='''custom''', )
if from_disk:
lowerCAmelCase_ = os.path.join(self.tmpdirname, '''dataset''' )
lowerCAmelCase_ = os.path.join(self.tmpdirname, '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname, '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname, '''dataset''' ) )
del dataset
lowerCAmelCase_ = RagRetriever(
UpperCamelCase__, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
else:
lowerCAmelCase_ = RagRetriever(
UpperCamelCase__, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, UpperCamelCase__ ), )
return retriever
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''', string_factory='''Flat''', metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase_ = os.path.join(self.tmpdirname, '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''', index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''], open(index_file_name + '''.index_meta.dpr''', '''wb''' ) )
lowerCAmelCase_ = os.path.join(self.tmpdirname, '''psgs_w100.tsv.pkl''' )
lowerCAmelCase_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(UpperCamelCase__, open(UpperCamelCase__, '''wb''' ) )
lowerCAmelCase_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name='''legacy''', index_path=self.tmpdirname, )
lowerCAmelCase_ = RagRetriever(
UpperCamelCase__, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = retriever.retrieve(UpperCamelCase__, n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ), UpperCamelCase__ )
self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCAmelCase_ = self.get_dummy_dataset()
retriever.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCAmelCase_ = retriever.retrieve(UpperCamelCase__, n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = retriever.retrieve(UpperCamelCase__, n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ), UpperCamelCase__ )
self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCAmelCase_ = retriever.retrieve(UpperCamelCase__, n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = retriever.retrieve(UpperCamelCase__, n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ), UpperCamelCase__ )
self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCAmelCase_ = retriever.retrieve(UpperCamelCase__, n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_legacy_index_retriever()
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = retriever.retrieve(UpperCamelCase__, n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ), UpperCamelCase__ )
self.assertEqual(doc_dicts[0]['''text'''][0], '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0], '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCAmelCase_ = retriever.retrieve(UpperCamelCase__, n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
import torch
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase_ = [[5, 7], [10, 11]]
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCAmelCase_ = retriever(UpperCamelCase__, UpperCamelCase__, prefix=retriever.config.generator.prefix, n_docs=UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, np.ndarray )
lowerCAmelCase_ = retriever(
UpperCamelCase__, UpperCamelCase__, prefix=retriever.config.generator.prefix, n_docs=UpperCamelCase__, return_tensors='''pt''', )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_dpr_ctx_encoder_tokenizer()
lowerCAmelCase_ = 1
lowerCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
retriever.set_ctx_encoder_tokenizer(UpperCamelCase__ )
lowerCAmelCase_ = [[5, 7], [10, 11]]
lowerCAmelCase_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCAmelCase_ = retriever(UpperCamelCase__, UpperCamelCase__, prefix=retriever.config.generator.prefix, n_docs=UpperCamelCase__ )
self.assertEqual(
len(UpperCamelCase__ ), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ), UpperCamelCase__ ) # check for doc token related keys in dictionary.
| 701 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = '''▁'''
_A = {'''vocab_file''': '''spiece.model'''}
_A = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
_A = {
'''google/pegasus-xsum''': 512,
}
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = VOCAB_FILES_NAMES
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
def __init__( self, UpperCamelCase__, UpperCamelCase__="<pad>", UpperCamelCase__="</s>", UpperCamelCase__="<unk>", UpperCamelCase__="<mask_2>", UpperCamelCase__="<mask_1>", UpperCamelCase__=None, UpperCamelCase__=103, UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError(
f"additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is"
f" {type(UpperCamelCase__ )}" )
lowerCAmelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(UpperCamelCase__ ), self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
lowerCAmelCase_ = additional_special_tokens_extended
else:
lowerCAmelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset )]
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__, unk_token=UpperCamelCase__, mask_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token_sent=UpperCamelCase__, offset=UpperCamelCase__, additional_special_tokens=UpperCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **UpperCamelCase__, )
lowerCAmelCase_ = mask_token_sent
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# add special tokens to encoder dict
lowerCAmelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase__, out_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase_ = self.sp_model.piece_to_id(UpperCamelCase__ )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
lowerCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
lowerCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=False ):
"""simple docstring"""
return 1
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
UpperCamelCase__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__, '''wb''' ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 325 | 0 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self , snake_case , snake_case ) -> Any:
"""simple docstring"""
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy'''
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self , snake_case=0 , snake_case=(4, 4, 6_4, 6_4) , snake_case=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple = jnp.bfloataa if fpaa else jnp.floataa
lowercase : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) , dtype=lowercase_ )
return image
def _UpperCAmelCase ( self , snake_case=False , snake_case="CompVis/stable-diffusion-v1-4" ) -> Optional[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
lowercase : Optional[int] = 'bf16' if fpaa else None
lowercase : Tuple = FlaxUNetaDConditionModel.from_pretrained(
lowercase_ , subfolder="""unet""" , dtype=lowercase_ , revision=lowercase_ )
return model, params
def _UpperCAmelCase ( self , snake_case=0 , snake_case=(4, 7_7, 7_6_8) , snake_case=False ) -> List[Any]:
"""simple docstring"""
lowercase : int = jnp.bfloataa if fpaa else jnp.floataa
lowercase : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) , dtype=lowercase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=lowercase_ )
lowercase : Optional[Any] = self.get_latents(lowercase_ , fpaa=lowercase_ )
lowercase : Any = self.get_encoder_hidden_states(lowercase_ , fpaa=lowercase_ )
lowercase : Optional[Any] = model.apply(
{"""params""": params} , lowercase_ , jnp.array(lowercase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowercase_ , ).sample
assert sample.shape == latents.shape
lowercase : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase : Any = jnp.array(lowercase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase_ , lowercase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
lowercase : Union[str, Any] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=lowercase_ )
lowercase : Any = self.get_latents(lowercase_ , shape=(4, 4, 9_6, 9_6) , fpaa=lowercase_ )
lowercase : Optional[Any] = self.get_encoder_hidden_states(lowercase_ , shape=(4, 7_7, 1_0_2_4) , fpaa=lowercase_ )
lowercase : str = model.apply(
{"""params""": params} , lowercase_ , jnp.array(lowercase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowercase_ , ).sample
assert sample.shape == latents.shape
lowercase : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase : str = jnp.array(lowercase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase_ , lowercase_ , atol=1E-2 )
| 607 |
from collections.abc import Iterable
from typing import Any
class A :
def __init__( self : Dict , lowercase_ : int | None = None ) -> int:
"""simple docstring"""
_lowerCamelCase : List[Any] =value
_lowerCamelCase : Node | None =None # Added in order to delete a node easier
_lowerCamelCase : Node | None =None
_lowerCamelCase : Node | None =None
def __repr__( self : Dict ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class A :
def __init__( self : Union[str, Any] , lowercase_ : Node | None = None ) -> int:
"""simple docstring"""
_lowerCamelCase : Optional[int] =root
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return str(self.root )
def lowerCamelCase ( self : Optional[Any] , lowercase_ : Node , lowercase_ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
_lowerCamelCase : Optional[int] =node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase_ ): # If it is the right children
_lowerCamelCase : int =new_children
else:
_lowerCamelCase : Dict =new_children
else:
_lowerCamelCase : Tuple =new_children
def lowerCamelCase ( self : Optional[Any] , lowercase_ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCamelCase ( self : int ) -> bool:
"""simple docstring"""
return self.root is None
def lowerCamelCase ( self : List[Any] , lowercase_ : Union[str, Any] ) -> None:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =Node(lowercase_ ) # create a new Node
if self.empty(): # if Tree is empty
_lowerCamelCase : Union[str, Any] =new_node # set its root
else: # Tree is not empty
_lowerCamelCase : Optional[int] =self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_lowerCamelCase : Optional[int] =new_node # We insert the new node in a leaf
break
else:
_lowerCamelCase : Optional[Any] =parent_node.left
else:
if parent_node.right is None:
_lowerCamelCase : Optional[Any] =new_node
break
else:
_lowerCamelCase : Optional[int] =parent_node.right
_lowerCamelCase : Optional[Any] =parent_node
def lowerCamelCase ( self : Any , *lowercase_ : Union[str, Any] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(lowercase_ )
def lowerCamelCase ( self : Optional[int] , lowercase_ : List[Any] ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
_lowerCamelCase : int =self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_lowerCamelCase : Dict =node.left if value < node.value else node.right
return node
def lowerCamelCase ( self : Tuple , lowercase_ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
_lowerCamelCase : Union[str, Any] =self.root
if not self.empty():
while node.right is not None:
_lowerCamelCase : Optional[int] =node.right
return node
def lowerCamelCase ( self : Optional[Any] , lowercase_ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
_lowerCamelCase : Union[str, Any] =self.root
if self.root is None:
return None
if not self.empty():
_lowerCamelCase : Optional[int] =self.root
while node.left is not None:
_lowerCamelCase : List[Any] =node.left
return node
def lowerCamelCase ( self : Tuple , lowercase_ : int ) -> None:
"""simple docstring"""
_lowerCamelCase : List[str] =self.search(lowercase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase_ , lowercase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase_ , node.left )
else:
_lowerCamelCase : List[str] =self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_lowerCamelCase : Union[str, Any] =(
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCamelCase ( self : str , lowercase_ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCamelCase ( self : str , lowercase_ : Dict=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : list , lowercase_ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(lowercase_ , node.left )
arr.append(node.value )
self.inorder(lowercase_ , node.right )
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Node ) -> int:
"""simple docstring"""
_lowerCamelCase : list[int] =[]
self.inorder(lowercase_ , lowercase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def a_ ( SCREAMING_SNAKE_CASE__ : Node | None ):
'''simple docstring'''
_lowerCamelCase : int =[]
if curr_node is not None:
_lowerCamelCase : List[Any] =postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =(8, 3, 6, 1, 10, 14, 13, 4, 7)
_lowerCamelCase : int =BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE__ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE__ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 464 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowercase : Optional[Any] = trt.Logger(trt.Logger.WARNING)
__lowercase : Union[str, Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowercase : Optional[Any] = logging.getLogger(__name__)
__lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=3_8_4,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=1_2_8,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=2_0,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=3_0,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=4_2, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
__lowercase : Any = parser.parse_args()
if args.tokenizer_name:
__lowercase : int = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
__lowercase : int = args.per_device_eval_batch_size
__lowercase : Optional[int] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowercase : Optional[Any] = True
__lowercase : Dict = """temp_engine/bert-fp32.engine"""
if args.fpaa:
__lowercase : Optional[Any] = """temp_engine/bert-fp16.engine"""
if args.inta:
__lowercase : Union[str, Any] = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
__lowercase : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowercase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
__lowercase : Union[str, Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowercase : List[str] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowercase : str = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowercase : List[str] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def lowerCamelCase_ ( _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ):
lowerCamelCase_ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowerCamelCase_ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowerCamelCase_ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase )
# start time
lowerCamelCase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCamelCase_ = time.time()
lowerCamelCase_ = end_time - start_time
lowerCamelCase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowercase : Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowercase : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowercase : Union[str, Any] = raw_datasets["""validation"""].column_names
__lowercase : List[str] = """question""" if """question""" in column_names else column_names[0]
__lowercase : Dict = """context""" if """context""" in column_names else column_names[1]
__lowercase : Tuple = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowercase : Dict = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__lowercase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase_ ( _lowerCamelCase : Tuple ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowerCamelCase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCamelCase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCamelCase_ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCamelCase_ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCamelCase_ = tokenized_examples.sequence_ids(_lowerCamelCase )
lowerCamelCase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCamelCase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCamelCase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__lowercase : int = raw_datasets["""validation"""]
# Validation Feature Creation
__lowercase : int = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
__lowercase : Any = default_data_collator
__lowercase : int = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
__lowercase : Dict = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase_ ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
lowerCamelCase_ = postprocess_qa_predictions(
examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCamelCase_ = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowerCamelCase_ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowerCamelCase_ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase )
__lowercase : int = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase_ ( _lowerCamelCase : Any ):
return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
__lowercase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowercase : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowercase : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowercase : int = cuda.mem_alloc(h_outputa.nbytes)
__lowercase : Any = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowercase : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__lowercase : List[Any] = 0.0
__lowercase : Dict = 0
__lowercase : List[Any] = timeit.default_timer()
__lowercase : str = None
for step, batch in enumerate(eval_dataloader):
__lowercase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowercase : int = outputs
__lowercase : Optional[Any] = torch.tensor(start_logits)
__lowercase : Dict = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowercase : str = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__lowercase : List[str] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__lowercase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowercase : Union[str, Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__lowercase : Union[str, Any] = nested_truncate(all_preds, len(eval_dataset))
__lowercase : List[str] = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_0_0_0 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_0_0_0))
logger.info("""Total Number of Inference = %d""", niter)
__lowercase : Any = post_processing_function(eval_examples, eval_dataset, all_preds)
__lowercase : Tuple = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 706 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowercase : List[str] = ["""bert-base-uncased""", """bert-base-cased"""]
__lowercase : Tuple = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowerCAmelCase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = tokenizer
lowerCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ = TFAutoModel.from_config(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(UpperCamelCase__ )
lowerCamelCase_ = self.bert(**UpperCamelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = [
BertTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCamelCase_ = [TFBertTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(UpperCamelCase__ , use_fast_bert_tokenizer=UpperCamelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase_ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCamelCase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tokenizer(UpperCamelCase__ , return_tensors='''tf''' , padding='''longest''' )
lowerCamelCase_ = tf_tokenizer(UpperCamelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf_tokenizer(self.paired_sentences )
lowerCamelCase_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf.function(UpperCamelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tf.constant(UpperCamelCase__ )
lowerCamelCase_ = compiled_tokenizer(UpperCamelCase__ )
lowerCamelCase_ = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = ModelToSave(tokenizer=UpperCamelCase__ )
lowerCamelCase_ = tf.convert_to_tensor(self.test_sentences )
lowerCamelCase_ = model(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(UpperCamelCase__ ) / '''saved.model'''
model.save(UpperCamelCase__ )
lowerCamelCase_ = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase_ = loaded_model(UpperCamelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 ) | 66 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( _lowercase, _lowercase, unittest.TestCase ):
__magic_name__ : str = StableDiffusionSAGPipeline
__magic_name__ : List[Any] = TEXT_TO_IMAGE_PARAMS
__magic_name__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ : Any = False
def lowercase__ (self : int ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
SCREAMING_SNAKE_CASE : Dict = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=__UpperCAmelCase, set_alpha_to_one=__UpperCAmelCase, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase__ (self : Any, __UpperCAmelCase : List[Any], __UpperCAmelCase : Union[str, Any]=0 ) -> str:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ (self : Tuple ) -> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def lowercase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE : List[str] = sag_pipe.to(__UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = '''.'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sag_pipe(
[prompt], generator=__UpperCAmelCase, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type='''np''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase__ (self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = sag_pipe.to(__UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = '''.'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = sag_pipe(
[prompt], generator=__UpperCAmelCase, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type='''np''' )
SCREAMING_SNAKE_CASE : Any = output.images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : int = sag_pipe.to(__UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = '''.'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sag_pipe(
[prompt], width=768, height=512, generator=__UpperCAmelCase, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type='''np''', )
SCREAMING_SNAKE_CASE : str = output.images
assert image.shape == (1, 512, 768, 3)
| 507 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowercase (_SCREAMING_SNAKE_CASE :str = "laptop" ):
SCREAMING_SNAKE_CASE : str = F'''https://www.amazon.in/laptop/s?k={product}'''
SCREAMING_SNAKE_CASE : int = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
SCREAMING_SNAKE_CASE : Dict = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).text )
# Initialize a Pandas dataframe with the column titles
SCREAMING_SNAKE_CASE : str = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
SCREAMING_SNAKE_CASE : Optional[int] = item.ha.text
SCREAMING_SNAKE_CASE : Tuple = '''https://www.amazon.in/''' + item.ha.a['''href''']
SCREAMING_SNAKE_CASE : Any = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
SCREAMING_SNAKE_CASE : Optional[int] = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
SCREAMING_SNAKE_CASE : Any = '''Not available'''
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
SCREAMING_SNAKE_CASE : List[str] = ''''''
try:
SCREAMING_SNAKE_CASE : Optional[Any] = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
SCREAMING_SNAKE_CASE : Optional[Any] = float('''nan''' )
except AttributeError:
pass
SCREAMING_SNAKE_CASE : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
SCREAMING_SNAKE_CASE : int = ''' '''
SCREAMING_SNAKE_CASE : List[str] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case_ = """headphones"""
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 507 | 1 |
import datasets
from .evaluate import evaluate
lowercase : str = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowercase : Tuple = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowercase : List[str] = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def _UpperCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
_UpperCamelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_UpperCamelCase = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_UpperCamelCase = evaluate(dataset=A, predictions=A )
return score
| 709 |
import os
import sys
import unittest
lowercase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase : int = os.path.join(git_repo_path, """src""", """transformers""")
lowercase : str = """
{0} = None
"""
lowercase : Union[str, Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
lowercase : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class __A( unittest.TestCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(A )
_UpperCamelCase = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(A, '''tokenizers''' )
_UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(A, '''tensorflow_text''' )
_UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(A, '''sentencepiece_and_tokenizers''' )
_UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(A, '''sentencepiece_and_tensorflow_text''' )
_UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(A, '''sentencepiece_and_tokenizers_and_vision''' )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''', A )
self.assertIn('''tensorflow_text''', A )
self.assertIn('''sentencepiece_and_tokenizers''', A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''', objects['''torch'''] )
self.assertIn('''TFBertModel''', objects['''tf'''] )
self.assertIn('''FlaxBertModel''', objects['''flax'''] )
self.assertIn('''BertModel''', objects['''torch'''] )
self.assertIn('''TFBertTokenizer''', objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''', objects['''sentencepiece_and_tokenizers'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = create_dummy_object('''CONSTANT''', '''\'torch\'''' )
self.assertEqual(A, '''\nCONSTANT = None\n''' )
_UpperCamelCase = create_dummy_object('''function''', '''\'torch\'''' )
self.assertEqual(
A, '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
_UpperCamelCase = create_dummy_object('''FakeClass''', '''\'torch\'''' )
self.assertEqual(A, A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
_UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''], A )
| 105 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCamelCase : Any = "\nHuman: <<task>>\n\nAssistant: "
__lowerCamelCase : Optional[int] = "huggingface-tools/default-prompts"
__lowerCamelCase : Any = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
A__ : Tuple =DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''', snake_case_ ) is not None:
return prompt_or_repo_id
A__ : Optional[Any] =cached_file(
snake_case_, PROMPT_FILES[mode], repo_type='''dataset''', user_agent={'''agent''': agent_name} )
with open(snake_case_, '''r''', encoding='''utf-8''' ) as f:
return f.read()
| 416 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class a ( UpperCamelCase_ ):
__lowercase = ["""input_values""", """padding_mask"""]
def __init__( self , __UpperCamelCase = 1 , __UpperCamelCase = 2_40_00 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> List[Any]:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
A__ : int =chunk_length_s
A__ : Optional[Any] =overlap
@property
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , )-> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
A__ : Any =True
A__ : int =bool(
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
A__ : Tuple =[np.asarray(__UpperCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
A__ : Any =np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
A__ : Optional[int] =raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
A__ : str =[np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
A__ : Optional[Any] =None
A__ : Union[str, Any] =BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
A__ : Optional[int] =min(array.shape[0] for array in raw_audio )
A__ : Any =int(np.floor(max_length / self.chunk_stride ) )
A__ : Dict =(nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
A__ : int =max(array.shape[0] for array in raw_audio )
A__ : Union[str, Any] =int(np.ceil(max_length / self.chunk_stride ) )
A__ : Any =(nb_step - 1) * self.chunk_stride + self.chunk_length
A__ : List[Any] ='''max_length'''
else:
A__ : Dict =input_values
# normal padding on batch
if padded_inputs is None:
A__ : Any =self.pad(
__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , padding=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
if padding:
A__ : List[str] =padded_inputs.pop('''attention_mask''' )
A__ : List[str] =[]
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
A__ : Any =example[..., None]
input_values.append(example.T )
A__ : Any =input_values
if return_tensors is not None:
A__ : Any =padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 416 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class UpperCAmelCase_ :
def __init__( self : Optional[int] , _lowercase : List[Any] , _lowercase : Any=1_3 , _lowercase : Optional[Any]=7 , _lowercase : Tuple=True , _lowercase : Optional[Any]=True , _lowercase : Optional[int]=False , _lowercase : Dict=True , _lowercase : List[str]=9_9 , _lowercase : List[Any]=3_2 , _lowercase : Tuple=5 , _lowercase : Tuple=4 , _lowercase : Optional[int]=3_7 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Optional[Any]=5_1_2 , _lowercase : Tuple=1_6 , _lowercase : List[str]=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Optional[Any]=3 , _lowercase : List[str]=4 , _lowercase : str=None , ) -> Optional[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_labels
_lowercase = num_choices
_lowercase = scope
def _lowerCamelCase ( self : int ) -> Dict:
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase = None
_lowercase = None
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase = ids_tensor([self.batch_size] , self.num_choices )
_lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Tuple ) -> List[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , _lowercase : List[Any] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) -> Dict:
_lowercase = LlamaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowercase = model(_lowercase , attention_mask=_lowercase )
_lowercase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , _lowercase : int , _lowercase : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] , ) -> List[str]:
_lowercase = True
_lowercase = LlamaModel(_lowercase )
model.to(_lowercase )
model.eval()
_lowercase = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
_lowercase = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , )
_lowercase = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : str , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Tuple , ) -> Any:
_lowercase = LlamaForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowercase = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : int , _lowercase : Dict , _lowercase : Tuple , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : Any , ) -> int:
_lowercase = True
_lowercase = True
_lowercase = LlamaForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
_lowercase = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , use_cache=_lowercase , )
_lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowercase = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , output_hidden_states=_lowercase , )["hidden_states"][0]
_lowercase = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["hidden_states"][0]
# select random slice
_lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def _lowerCamelCase ( self : Dict ) -> List[str]:
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase__, lowercase__, lowercase__, unittest.TestCase ):
snake_case_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
snake_case_ = (LlamaForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self : int ) -> Optional[Any]:
_lowercase = LlamaModelTester(self )
_lowercase = ConfigTester(self , config_class=_lowercase , hidden_size=3_7 )
def _lowerCamelCase ( self : Optional[int] ) -> Tuple:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Dict ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase = type
self.model_tester.create_and_check_model(*_lowercase )
def _lowerCamelCase ( self : List[Any] ) -> List[Any]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = 3
_lowercase = input_dict["input_ids"]
_lowercase = input_ids.ne(1 ).to(_lowercase )
_lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase = LlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowercase = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : int ) -> Optional[Any]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = 3
_lowercase = "single_label_classification"
_lowercase = input_dict["input_ids"]
_lowercase = input_ids.ne(1 ).to(_lowercase )
_lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase = LlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowercase = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : str ) -> Tuple:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = 3
_lowercase = "multi_label_classification"
_lowercase = input_dict["input_ids"]
_lowercase = input_ids.ne(1 ).to(_lowercase )
_lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowercase = LlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowercase = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def _lowerCamelCase ( self : List[str] ) -> Optional[int]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _lowerCamelCase ( self : Optional[Any] , _lowercase : str ) -> str:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = ids_tensor([1, 1_0] , config.vocab_size )
_lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowercase = LlamaModel(_lowercase )
original_model.to(_lowercase )
original_model.eval()
_lowercase = original_model(_lowercase ).last_hidden_state
_lowercase = original_model(_lowercase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowercase = {"type": scaling_type, "factor": 10.0}
_lowercase = LlamaModel(_lowercase )
scaled_model.to(_lowercase )
scaled_model.eval()
_lowercase = scaled_model(_lowercase ).last_hidden_state
_lowercase = scaled_model(_lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowerCamelCase ( self : Union[str, Any] ) -> Any:
_lowercase = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
_lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowercase = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , _lowercase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , _lowercase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowerCamelCase ( self : List[str] ) -> Dict:
_lowercase = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
_lowercase = model(torch.tensor(_lowercase ) )
# Expected mean on dim = -1
_lowercase = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , _lowercase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , _lowercase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowerCamelCase ( self : Dict ) -> Optional[int]:
_lowercase = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
_lowercase = model(torch.tensor(_lowercase ) )
# Expected mean on dim = -1
_lowercase = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , _lowercase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowercase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def _lowerCamelCase ( self : Dict ) -> List[Any]:
_lowercase = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
_lowercase = model(torch.tensor(_lowercase ) )
_lowercase = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowercase , atol=1e-2 , rtol=1e-2 )
# fmt: off
_lowercase = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , _lowercase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Model is curently gated" )
@slow
def _lowerCamelCase ( self : Optional[Any] ) -> List[str]:
_lowercase = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
_lowercase = "Simply put, the theory of relativity states that "
_lowercase = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
_lowercase = tokenizer.encode(_lowercase , return_tensors="pt" )
_lowercase = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=_lowercase )
# greedy generation outputs
_lowercase = model.generate(_lowercase , max_new_tokens=6_4 , top_p=_lowercase , temperature=1 , do_sample=_lowercase )
_lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 721 | """simple docstring"""
def __UpperCAmelCase ( _snake_case : list, _snake_case : list, _snake_case : int ):
if len(_snake_case ) != len(_snake_case ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowercase = [p / w for p, w in zip(_snake_case, _snake_case )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowercase = sorted(_snake_case )
# declaring useful variables
_lowercase = len(_snake_case )
_lowercase = 0
_lowercase = 0
_lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowercase = sorted_profit_by_weight[length - i - 1]
_lowercase = profit_by_weight.index(_snake_case )
_lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
__UpperCamelCase : Any = [int(x) for x in input("Input profits separated by spaces: ").split()]
__UpperCamelCase : Optional[Any] = [int(x) for x in input("Input weights separated by spaces: ").split()]
__UpperCamelCase : Dict = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight) | 227 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = "perceiver"
def __init__( self ,snake_case__=256 ,snake_case__=1280 ,snake_case__=768 ,snake_case__=1 ,snake_case__=26 ,snake_case__=8 ,snake_case__=8 ,snake_case__=None ,snake_case__=None ,snake_case__="kv" ,snake_case__=1 ,snake_case__=1 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=True ,snake_case__=262 ,snake_case__=2048 ,snake_case__=56 ,snake_case__=[368, 496] ,snake_case__=16 ,snake_case__=1920 ,snake_case__=16 ,snake_case__=[1, 16, 224, 224] ,**snake_case__ ,):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = num_latents
SCREAMING_SNAKE_CASE_ : List[str] = d_latents
SCREAMING_SNAKE_CASE_ : Optional[int] = d_model
SCREAMING_SNAKE_CASE_ : Tuple = num_blocks
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Dict = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : int = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[str] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_frames
SCREAMING_SNAKE_CASE_ : Optional[Any] = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : List[Any] = samples_per_patch
SCREAMING_SNAKE_CASE_ : Any = output_shape
class lowerCAmelCase_ ( lowerCamelCase_ ):
@property
def snake_case ( self ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def snake_case ( self ):
return 1E-4
def snake_case ( self ,snake_case__ ,snake_case__ = -1 ,snake_case__ = -1 ,snake_case__ = -1 ,snake_case__ = False ,snake_case__ = None ,snake_case__ = 3 ,snake_case__ = 40 ,snake_case__ = 40 ,):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(snake_case__ ,snake_case__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Dict = compute_effective_axis_dimension(
snake_case__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = compute_effective_axis_dimension(
snake_case__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : List[Any] = [' '.join(['a'] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : Dict = dict(preprocessor(snake_case__ ,return_tensors=snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs.pop('input_ids' )
return inputs
elif isinstance(snake_case__ ,snake_case__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Dict = compute_effective_axis_dimension(snake_case__ ,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Tuple = self._generate_dummy_images(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dict(preprocessor(images=snake_case__ ,return_tensors=snake_case__ ) )
SCREAMING_SNAKE_CASE_ : str = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 105 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=99 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Optional[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = 13
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Dict = 3_84
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Any = 37
SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Tuple = 0.0_2
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : str = 1_28
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertForMaskedLM(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForTokenClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
if hasattr(lowerCamelCase_ , '''use_cache''' ):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.models.load_model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''encoder_hidden_states''']
SCREAMING_SNAKE_CASE : str = outputs['''encoder_attentions''']
else:
SCREAMING_SNAKE_CASE : List[str] = outputs['''hidden_states''']
SCREAMING_SNAKE_CASE : List[Any] = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : int = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ :Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 698 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 |
'''simple docstring'''
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not input_list:
return []
_lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list]
_lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630 | 1 |
from __future__ import annotations
import numpy as np
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return np.maximum(0 , lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 80 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_snake_case = partial(lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 672 | 0 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class snake_case_ (lowercase__ ):
"""simple docstring"""
def A_ ( self ,lowercase=None ,lowercase=None ,lowercase=None ,**lowercase):
"""simple docstring"""
if tokenize_kwargs is None:
UpperCAmelCase_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)")
UpperCAmelCase_ : List[Any] = truncation
UpperCAmelCase_ : Dict = tokenize_kwargs
UpperCAmelCase_ : List[str] = {}
if return_tensors is not None:
UpperCAmelCase_ : Tuple = return_tensors
return preprocess_params, {}, postprocess_params
def A_ ( self ,lowercase ,**lowercase):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.framework
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(lowercase ,return_tensors=lowercase ,**lowercase)
return model_inputs
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Any = self.model(**lowercase)
return model_outputs
def A_ ( self ,lowercase ,lowercase=False):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self ,*lowercase ,**lowercase):
"""simple docstring"""
return super().__call__(*lowercase ,**lowercase)
| 710 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
class snake_case_ (lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase = 1
@register_to_config
def __init__( self ,lowercase = 2000 ,lowercase = 0.15 ,lowercase = 0.01 ,lowercase = 1348.0 ,lowercase = 1E-5 ,lowercase = 1 ,):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = sigma_max
# setable values
UpperCAmelCase_ : Optional[int] = None
self.set_sigmas(lowercase ,lowercase ,lowercase ,lowercase)
def A_ ( self ,lowercase ,lowercase = None):
"""simple docstring"""
return sample
def A_ ( self ,lowercase ,lowercase = None ,lowercase = None):
"""simple docstring"""
UpperCAmelCase_ : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase_ : List[Any] = torch.linspace(1 ,lowercase ,lowercase ,device=lowercase)
def A_ ( self ,lowercase ,lowercase = None ,lowercase = None ,lowercase = None):
"""simple docstring"""
UpperCAmelCase_ : Any = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase_ : int = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase_ : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase ,lowercase)
UpperCAmelCase_ : Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase_ : Optional[int] = torch.exp(torch.linspace(math.log(lowercase) ,math.log(lowercase) ,lowercase))
UpperCAmelCase_ : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def A_ ( self ,lowercase ,lowercase ,lowercase ,lowercase = None ,lowercase = True ,):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
UpperCAmelCase_ : Optional[int] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase_ : Tuple = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase_ : Optional[int] = timesteps.to(self.discrete_sigmas.device)
UpperCAmelCase_ : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
UpperCAmelCase_ : Optional[Any] = self.get_adjacent_sigma(lowercase ,lowercase).to(sample.device)
UpperCAmelCase_ : Any = torch.zeros_like(lowercase)
UpperCAmelCase_ : Dict = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase_ : Dict = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
UpperCAmelCase_ : List[str] = diffusion.unsqueeze(-1)
UpperCAmelCase_ : List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase_ : Union[str, Any] = randn_tensor(
sample.shape ,layout=sample.layout ,generator=lowercase ,device=sample.device ,dtype=sample.dtype)
UpperCAmelCase_ : Any = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase_ : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase ,prev_sample_mean=lowercase)
def A_ ( self ,lowercase ,lowercase ,lowercase = None ,lowercase = True ,):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase_ : int = randn_tensor(sample.shape ,layout=sample.layout ,generator=lowercase).to(sample.device)
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase_ : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
UpperCAmelCase_ : Optional[Any] = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
UpperCAmelCase_ : List[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase_ : Optional[Any] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase_ : Any = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
UpperCAmelCase_ : Tuple = step_size.unsqueeze(-1)
UpperCAmelCase_ : Dict = sample + step_size * model_output
UpperCAmelCase_ : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase)
def A_ ( self ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : Any = timesteps.to(original_samples.device)
UpperCAmelCase_ : List[str] = self.discrete_sigmas.to(original_samples.device)[timesteps]
UpperCAmelCase_ : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase) * sigmas[:, None, None, None]
)
UpperCAmelCase_ : Tuple = noise + original_samples
return noisy_samples
def __len__( self):
"""simple docstring"""
return self.config.num_train_timesteps
| 455 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCamelCase__ ( unittest.TestCase , A ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = load_tool("""text-classification""" )
self.tool.setup()
__UpperCAmelCase : str = load_tool("""text-classification""" , remote=UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase , """positive""" )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase , """positive""" )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase , """positive""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase , """positive""" )
| 139 |
"""simple docstring"""
import baseaa
def lowerCamelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowerCamelCase ( _UpperCamelCase : bytes ) -> str:
'''simple docstring'''
return baseaa.baadecode(_UpperCamelCase ).decode("""utf-8""" )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = 'Hello World!'
UpperCAmelCase : Tuple = baseaa_encode(test)
print(encoded)
UpperCAmelCase : Any = baseaa_decode(encoded)
print(decoded)
| 139 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase: Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase: int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCAmelCase: int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
for attribute in key.split(""".""" ):
_lowercase : Dict = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowercase : Union[str, Any] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowercase : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowercase : str = value
elif weight_type == "weight_g":
_lowercase : Tuple = value
elif weight_type == "weight_v":
_lowercase : Tuple = value
elif weight_type == "bias":
_lowercase : Optional[Any] = value
else:
_lowercase : Optional[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = []
_lowercase : Tuple = fairseq_model.state_dict()
_lowercase : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_lowercase : Tuple = None
for name, value in fairseq_dict.items():
_lowercase : str = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowercase : List[Any] = True
elif name.split(""".""" )[0] == "proj":
_lowercase : Optional[Any] = fairseq_model.proj
_lowercase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowercase : str = True
if "*" in mapped_key:
_lowercase : Dict = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
_lowercase : int = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
_lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
_lowercase : int = """weight_v"""
elif "bias" in name:
_lowercase : List[str] = """bias"""
elif "weight" in name:
_lowercase : List[str] = """weight"""
else:
_lowercase : List[str] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
_lowercase : Any = name.split(""".""" )
_lowercase : int = int(items[0] )
_lowercase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowercase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowercase : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowercase : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowercase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[str] = emb.weight.shape
_lowercase : Optional[int] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
_lowercase : List[Any] = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
_lowercase : str = f.readlines()
_lowercase : int = [line.split(""" """ )[0] for line in lines]
_lowercase : Any = len(__UpperCAmelCase )
_lowercase : int = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(__UpperCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
_lowercase : Union[str, Any] = WavaVecaConfig.from_pretrained(__UpperCAmelCase )
_lowercase : Tuple = SpeechaTextaConfig.from_pretrained(
__UpperCAmelCase , vocab_size=__UpperCAmelCase , decoder_layers=__UpperCAmelCase , do_stable_layer_norm=__UpperCAmelCase )
_lowercase : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowercase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_lowercase : List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
_lowercase : Union[str, Any] = WavaVecaModel(__UpperCAmelCase )
_lowercase : str = recursively_load_weights_wavaveca(model.encoder , __UpperCAmelCase )
_lowercase : str = SpeechaTextaForCausalLM(__UpperCAmelCase )
_lowercase : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCAmelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_lowercase : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_lowercase : List[str] = SpeechEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
_lowercase : Tuple = False
# add projection layer
_lowercase : Any = nn.Parameter(projection_layer.weight )
_lowercase : Tuple = nn.Parameter(projection_layer.bias )
_lowercase : Dict = create_vocab_dict(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowercase : str = SpeechaTextaTokenizer(os.path.join(__UpperCAmelCase , """vocab.json""" ) )
tokenizer.save_pretrained(__UpperCAmelCase )
_lowercase : Tuple = hf_wavavec.config.to_dict()
_lowercase : Any = tokenizer.pad_token_id
_lowercase : str = tokenizer.bos_token_id
_lowercase : Optional[int] = tokenizer.eos_token_id
_lowercase : Tuple = """speech_to_text_2"""
_lowercase : Any = """wav2vec2"""
_lowercase : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(__UpperCAmelCase )
hf_wavavec.save_pretrained(__UpperCAmelCase )
feature_extractor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: Dict = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10_224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
UpperCAmelCase: Any = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 710 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : str = 1
@add_end_docstrings(snake_case )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "generated"
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCamelCase__ ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ,):
_lowercase : str = {}
if truncation is not None:
_lowercase : Dict = truncation
_lowercase : Any = generate_kwargs
_lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
_lowercase : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_lowercase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_lowercase : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowercase : Optional[Any] = self.tokenizer.encode(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_lowercase : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
return True
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] ,UpperCAmelCase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_lowercase : int = ([prefix + arg for arg in args[0]],)
_lowercase : Any = True
elif isinstance(args[0] ,UpperCAmelCase_ ):
_lowercase : Optional[int] = (prefix + args[0],)
_lowercase : List[Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_lowercase : List[Any] = self.tokenizer(*UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Union[str, Any] = super().__call__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
if (
isinstance(args[0] ,UpperCAmelCase_ )
and all(isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) for el in args[0] )
and all(len(UpperCAmelCase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=TruncationStrategy.DO_NOT_TRUNCATE ,**UpperCAmelCase_ ):
_lowercase : List[str] = self._parse_and_tokenize(UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,**UpperCAmelCase_ )
return inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if self.framework == "pt":
_lowercase , _lowercase : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_lowercase , _lowercase : Tuple = tf.shape(model_inputs["""input_ids"""] ).numpy()
_lowercase : List[str] = generate_kwargs.get("""min_length""" ,self.model.config.min_length )
_lowercase : Any = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
self.check_inputs(UpperCAmelCase_ ,generate_kwargs["""min_length"""] ,generate_kwargs["""max_length"""] )
_lowercase : List[str] = self.model.generate(**UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : str = output_ids.shape[0]
if self.framework == "pt":
_lowercase : List[str] = output_ids.reshape(UpperCAmelCase_ ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
_lowercase : Tuple = tf.reshape(UpperCAmelCase_ ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=ReturnType.TEXT ,UpperCAmelCase_=False ):
_lowercase : List[str] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_lowercase : List[str] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
UpperCAmelCase_ ,skip_special_tokens=UpperCAmelCase_ ,clean_up_tokenization_spaces=UpperCAmelCase_ ,)
}
records.append(UpperCAmelCase_ )
return records
@add_end_docstrings(snake_case )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "summary"
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return super().__call__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(snake_case )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "translation"
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,UpperCAmelCase_=TruncationStrategy.DO_NOT_TRUNCATE ,UpperCAmelCase_=None ,UpperCAmelCase_=None ):
if getattr(self.tokenizer ,"""_build_translation_inputs""" ,UpperCAmelCase_ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase_ ,return_tensors=self.framework ,truncation=UpperCAmelCase_ ,src_lang=UpperCAmelCase_ ,tgt_lang=UpperCAmelCase_ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase_ ,truncation=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ):
_lowercase , _lowercase , _lowercase : Any = super()._sanitize_parameters(**UpperCAmelCase_ )
if src_lang is not None:
_lowercase : List[Any] = src_lang
if tgt_lang is not None:
_lowercase : List[str] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_lowercase : Optional[int] = kwargs.get("""task""" ,self.task )
_lowercase : Union[str, Any] = task.split("""_""" )
if task and len(UpperCAmelCase_ ) == 4:
# translation, XX, to YY
_lowercase : Optional[Any] = items[1]
_lowercase : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return super().__call__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
| 600 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _A ( lowercase__ , lowercase__ ):
lowercase__ = f'''{sampling_rate}'''
lowercase__ = """1"""
lowercase__ = """f32le"""
lowercase__ = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(_lowercase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ = ffmpeg_process.communicate(_lowercase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowercase__ = output_stream[0]
lowercase__ = np.frombuffer(_lowercase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _A ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ):
lowercase__ = f'''{sampling_rate}'''
lowercase__ = """1"""
if format_for_conversion == "s16le":
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase__ = platform.system()
if system == "Linux":
lowercase__ = """alsa"""
lowercase__ = """default"""
elif system == "Darwin":
lowercase__ = """avfoundation"""
lowercase__ = """:0"""
elif system == "Windows":
lowercase__ = """dshow"""
lowercase__ = """default"""
lowercase__ = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ = _ffmpeg_stream(_lowercase , _lowercase )
for item in iterator:
yield item
def _A ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ):
if stream_chunk_s is not None:
lowercase__ = stream_chunk_s
else:
lowercase__ = chunk_length_s
lowercase__ = ffmpeg_microphone(_lowercase , _lowercase , format_for_conversion=_lowercase )
if format_for_conversion == "s16le":
lowercase__ = np.intaa
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = np.floataa
lowercase__ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase__ = chunk_length_s / 6
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowercase , (int, float) ):
lowercase__ = [stride_length_s, stride_length_s]
lowercase__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ = datetime.datetime.now()
lowercase__ = datetime.timedelta(seconds=_lowercase )
for item in chunk_bytes_iter(_lowercase , _lowercase , stride=(stride_left, stride_right) , stream=_lowercase ):
# Put everything back in numpy scale
lowercase__ = np.frombuffer(item["""raw"""] , dtype=_lowercase )
lowercase__ = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowercase__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
lowercase__ = b""""""
lowercase__ , lowercase__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase__ = 0
for raw in iterator:
acc += raw
if stream and len(_lowercase ) < chunk_len:
lowercase__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowercase ) >= chunk_len:
# We are flushing the accumulator
lowercase__ = (_stride_left, stride_right)
lowercase__ = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowercase__ = False
yield item
lowercase__ = stride_left
lowercase__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowercase ) > stride_left:
lowercase__ = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowercase__ = False
yield item
def _A ( lowercase__ , lowercase__ ):
lowercase__ = 2**24 # 16Mo
try:
with subprocess.Popen(_lowercase , stdout=subprocess.PIPE , bufsize=_lowercase ) as ffmpeg_process:
while True:
lowercase__ = ffmpeg_process.stdout.read(_lowercase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 325 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCAmelCase ( UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: int=10 , UpperCamelCase_: str=100 , UpperCamelCase_: Tuple=1026 , UpperCamelCase_: str=True , UpperCamelCase_: List[Any]="data/tokenized_stories_train_wikitext103.jbl" , UpperCamelCase_: Optional[Any]="igf_context_pairs.jbl" , ) -> int:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
_a , _a = generate_datasets(
UpperCamelCase_ , UpperCamelCase_ , number=UpperCamelCase_ , min_len=1026 , trim=UpperCamelCase_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_a = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
_a = load_gpta("gpt2" ).to(UpperCamelCase_ )
print("computing perplexity on objective set" )
_a = compute_perplexity(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).item()
print("perplexity on objective set:" , UpperCamelCase_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCAmelCase ( UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any]=15 , UpperCamelCase_: Dict=128 , UpperCamelCase_: int=100 , UpperCamelCase_: Optional[int]="igf_model.pt" , ) -> Tuple:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
_a = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
_a = SecondaryLearner(UpperCamelCase_ )
# Train secondary learner
_a = train_secondary_learner(
UpperCamelCase_ , UpperCamelCase_ , max_epochs=UpperCamelCase_ , batch_size=UpperCamelCase_ , eval_freq=100 , igf_model_path=UpperCamelCase_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCAmelCase ( UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple=32 , UpperCamelCase_: Tuple=1000 , UpperCamelCase_: Optional[Any]=16 , UpperCamelCase_: List[str]=1.0 , UpperCamelCase_: Optional[Any]=recopy_gpta , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[Any]=10 , UpperCamelCase_: Dict="gpt2_finetuned.pt" , ) -> Optional[Any]:
'''simple docstring'''
_a = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
_a = RandomSampler(UpperCamelCase_ )
_a = DataLoader(UpperCamelCase_ , sampler=UpperCamelCase_ )
_a = max_steps // (len(UpperCamelCase_ )) + 1
_a = 0
_a = torch.zeros((1, context_len) , dtype=torch.long , device=UpperCamelCase_ )
_a , _a , _a = recopy_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCamelCase_ )
secondary_learner.eval()
_a = []
_a = 0
_a = []
_a = []
# Compute the performance of the transformer model at the beginning
_a = compute_perplexity(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
test_perps.append(UpperCamelCase_ )
print("Test perplexity, step" , UpperCamelCase_ , ":" , UpperCamelCase_ )
for epoch in range(int(UpperCamelCase_ ) ):
for step, example in enumerate(UpperCamelCase_ ):
torch.cuda.empty_cache()
_a = random.randint(0 , example.size(2 ) - context_len - 1 )
_a = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_a = model(UpperCamelCase_ , labels=UpperCamelCase_ )
_a = True
if secondary_learner is not None:
_a = secondary_learner.forward(
torch.tensor(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCamelCase_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_a = -1
if predicted_q < threshold:
_a = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_a = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_a = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_a = compute_perplexity(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
test_perps.append(UpperCamelCase_ )
print("Test perplexity, step" , UpperCamelCase_ , ":" , UpperCamelCase_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , UpperCamelCase_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
_a = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=UpperCamelCase_ , default=UpperCamelCase_ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=UpperCamelCase_ , type=UpperCamelCase_ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=UpperCamelCase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=UpperCamelCase_ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=UpperCamelCase_ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=UpperCamelCase_ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=UpperCamelCase_ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=UpperCamelCase_ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=UpperCamelCase_ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=UpperCamelCase_ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=UpperCamelCase_ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=UpperCamelCase_ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=UpperCamelCase_ , type=UpperCamelCase_ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=UpperCamelCase_ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=UpperCamelCase_ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=UpperCamelCase_ , type=UpperCamelCase_ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=UpperCamelCase_ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
_a = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
_a = training_secondary_learner(
UpperCamelCase_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
_a = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_a , _a = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=UpperCamelCase_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=UpperCamelCase_ , secondary_learner=UpperCamelCase_ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 701 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase_ (_UpperCAmelCase ):
A__ : str = '''Salesforce/blip-image-captioning-base'''
A__ : Optional[int] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
A__ : str = '''image_captioner'''
A__ : Tuple = AutoModelForVisionaSeq
A__ : Tuple = ['''image''']
A__ : List[Any] = ['''text''']
def __init__( self , *a_ , **a_ ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*a_ , **a_ )
def lowerCamelCase__ ( self , a_ ) ->Union[str, Any]:
'''simple docstring'''
return self.pre_processor(images=a_ , return_tensors="pt" )
def lowerCamelCase__ ( self , a_ ) ->List[Any]:
'''simple docstring'''
return self.model.generate(**a_ )
def lowerCamelCase__ ( self , a_ ) ->Tuple:
'''simple docstring'''
return self.pre_processor.batch_decode(a_ , skip_special_tokens=a_ )[0].strip()
| 612 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Dict = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
__UpperCamelCase : Union[str, Any] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = RobertaTokenizer
def __init__( self : List[str] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Dict="replace" , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Dict="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]="<mask>" , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[str]=True , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[Any] = pre_tok_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = add_prefix_space
SCREAMING_SNAKE_CASE : Tuple = '''post_processor'''
SCREAMING_SNAKE_CASE : Dict = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : List[str] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : List[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Dict = False
if state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : List[str] = add_prefix_space
SCREAMING_SNAKE_CASE : Tuple = True
if state.get('''trim_offsets''' , UpperCamelCase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE : Optional[int] = trim_offsets
SCREAMING_SNAKE_CASE : Any = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(UpperCamelCase__ , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : str = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
def __A ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self : Any , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
SCREAMING_SNAKE_CASE : int = value
def __A ( self : Tuple , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 248 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__UpperCamelCase : Any = None
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : int = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
__UpperCamelCase : Dict = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
__UpperCamelCase : Optional[int] = '▁'
# Segments (not really needed)
__UpperCamelCase : List[str] = 0
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 2
__UpperCamelCase : int = 3
__UpperCamelCase : List[Any] = 4
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = """left"""
UpperCamelCase_ = XLNetTokenizer
def __init__( self : List[str] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]="<s>" , UpperCamelCase__ : Optional[Any]="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Dict="<sep>" , UpperCamelCase__ : List[str]="<pad>" , UpperCamelCase__ : Tuple="<cls>" , UpperCamelCase__ : Dict="<mask>" , UpperCamelCase__ : List[str]=["<eop>", "<eod>"] , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
SCREAMING_SNAKE_CASE : Tuple = remove_space
SCREAMING_SNAKE_CASE : Optional[int] = keep_accents
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE : List[Any] = False if not self.vocab_file else True
def __A ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 248 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_A: str = """Create a default config file for Accelerate with only a few flags set."""
def _lowerCAmelCase ( _lowerCAmelCase="no" , _lowerCAmelCase = default_json_config_file , _lowerCAmelCase = False )-> List[Any]:
__UpperCAmelCase = Path(_lowerCAmelCase )
path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__UpperCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__UpperCAmelCase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
__UpperCAmelCase = num_gpus
__UpperCAmelCase = False
if num_gpus > 1:
__UpperCAmelCase = 'MULTI_GPU'
else:
__UpperCAmelCase = 'NO'
elif is_xpu_available() and use_xpu:
__UpperCAmelCase = torch.xpu.device_count()
__UpperCAmelCase = num_xpus
__UpperCAmelCase = False
if num_xpus > 1:
__UpperCAmelCase = 'MULTI_XPU'
else:
__UpperCAmelCase = 'NO'
elif is_npu_available():
__UpperCAmelCase = torch.npu.device_count()
__UpperCAmelCase = num_npus
__UpperCAmelCase = False
if num_npus > 1:
__UpperCAmelCase = 'MULTI_NPU'
else:
__UpperCAmelCase = 'NO'
else:
__UpperCAmelCase = 0
__UpperCAmelCase = True
__UpperCAmelCase = 1
__UpperCAmelCase = 'NO'
__UpperCAmelCase = ClusterConfig(**_lowerCAmelCase )
config.to_json_file(_lowerCAmelCase )
return path
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> List[str]:
__UpperCAmelCase = parser.add_parser('default' , parents=_lowerCAmelCase , help=_lowerCAmelCase , formatter_class=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _lowerCAmelCase ( _lowerCAmelCase )-> Union[str, Any]:
__UpperCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 617 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A: Union[str, Any] = logging.get_logger(__name__)
_A: List[str] = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : int = """roc_bert"""
def __init__( self , __A=30_522 , __A=768 , __A=12 , __A=12 , __A=3_072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=True , __A=0 , __A="absolute" , __A=None , __A=True , __A=True , __A=768 , __A=910 , __A=512 , __A=24_858 , __A=True , **__A , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = use_cache
__UpperCAmelCase = enable_pronunciation
__UpperCAmelCase = enable_shape
__UpperCAmelCase = pronunciation_embed_dim
__UpperCAmelCase = pronunciation_vocab_size
__UpperCAmelCase = shape_embed_dim
__UpperCAmelCase = shape_vocab_size
__UpperCAmelCase = concat_input
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = classifier_dropout
super().__init__(pad_token_id=__A , **__A )
| 617 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : Union[str, Any]=None ):
'''simple docstring'''
lowercase : Optional[Any] =np.random.default_rng(UpperCAmelCase__ )
lowercase : Union[str, Any] =length
lowercase : List[Any] =rng.normal(size=(length,) ).astype(np.floataa )
lowercase : Dict =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Tuple ):
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : str=False ):
'''simple docstring'''
super().__init__()
lowercase : Any =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase : List[str] =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase : List[Any] =True
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Any=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
lowercase : Any =False
return x * self.a[0] + self.b[0]
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : Optional[Any]=False ):
'''simple docstring'''
super().__init__()
lowercase : Optional[int] =torch.nn.Parameter(torch.tensor(UpperCAmelCase__ ).float() )
lowercase : int =torch.nn.Parameter(torch.tensor(UpperCAmelCase__ ).float() )
lowercase : Tuple =True
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
lowercase : List[Any] =False
return x * self.a + self.b
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
lowercase : Dict =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : Optional[int] ={'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowercase : Dict =load_dataset('''csv''' , data_files=__magic_name__ )
lowercase : int =datasets['''train'''].unique('''label''' )
lowercase : List[str] ={v: i for i, v in enumerate(__magic_name__ )}
def tokenize_function(__magic_name__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowercase : Dict =tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ , padding='''max_length''' )
if "label" in examples:
lowercase : List[Any] =[label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase : Optional[int] =datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__magic_name__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__magic_name__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__magic_name__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase : Union[str, Any] =DataLoader(tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=2 )
lowercase : Tuple =DataLoader(tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 92 | def lowerCamelCase ( UpperCamelCase : int = 60_08_51_47_51_43 ) -> int:
try:
_lowerCamelCase = int(UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
_lowerCamelCase = 1
_lowerCamelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCamelCase = i
n //= i
i += 1
if n > 1:
_lowerCamelCase = n
return int(UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''') | 544 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _lowercase ( __lowercase , __lowercase ):
_lowerCamelCase = 'dinat'
_lowerCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , UpperCamelCase_=4 , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=[3, 4, 6, 5] , UpperCamelCase_=[2, 4, 8, 16] , UpperCamelCase_=7 , UpperCamelCase_=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase_=3.0 , UpperCamelCase_=True , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(**__A )
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = len(__A )
__magic_name__ = num_heads
__magic_name__ = kernel_size
__magic_name__ = dilations
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ = int(embed_dim * 2 ** (len(__A ) - 1) )
__magic_name__ = layer_scale_init_value
__magic_name__ = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(__A ) + 1 )]
__magic_name__ , __magic_name__ = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 714 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 190 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["pixel_values"]
def __init__( self : List[Any] , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : int = 8 , **lowerCamelCase : Tuple , ) -> None:
super().__init__(**lowerCamelCase )
__snake_case : Dict = do_rescale
__snake_case : Dict = rescale_factor
__snake_case : Optional[Any] = do_pad
__snake_case : Tuple = pad_size
def __snake_case ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : float , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Optional[int] ) -> np.ndarray:
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : int , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ) -> Tuple:
__snake_case , __snake_case : List[str] = get_image_size(lowerCamelCase )
__snake_case : Optional[Any] = (old_height // size + 1) * size - old_height
__snake_case : List[Any] = (old_width // size + 1) * size - old_width
return pad(lowerCamelCase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : ImageInput , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case : int = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : str = do_pad if do_pad is not None else self.do_pad
__snake_case : Any = pad_size if pad_size is not None else self.pad_size
__snake_case : int = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__snake_case : str = [to_numpy_array(lowerCamelCase ) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_pad:
__snake_case : Optional[Any] = [self.pad(lowerCamelCase , size=lowerCamelCase ) for image in images]
__snake_case : int = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__snake_case : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 81 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str]=[] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = size[0] - overlap_pixels * 2
_lowerCAmelCase : List[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowerCAmelCase : List[str] = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowerCAmelCase : Dict = np.pad(UpperCamelCase_ , mode="""linear_ramp""" , pad_width=UpperCamelCase_ , end_values=0 )
if "l" in remove_borders:
_lowerCAmelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowerCAmelCase : List[str] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowerCAmelCase : str = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowerCAmelCase : List[Any] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _UpperCAmelCase (UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
return max(UpperCamelCase_ , min(UpperCamelCase_ , UpperCamelCase_ ) )
def _UpperCAmelCase (UpperCamelCase_ : [int] , UpperCamelCase_ : [int] , UpperCamelCase_ : [int] ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _UpperCAmelCase (UpperCamelCase_ : [int] , UpperCamelCase_ : int , UpperCamelCase_ : [int] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = list(UpperCamelCase_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowerCAmelCase : int = clamp_rect(UpperCamelCase_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : str = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCamelCase_ , (original_slice, 0) )
return result
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowerCAmelCase : Tuple = tile.crop(UpperCamelCase_ )
return tile
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = n % d
return n - divisor
class __snake_case (_a ):
def __init__( self : Any , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : DDPMScheduler , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : int = 350 , ) -> Dict:
'''simple docstring'''
super().__init__(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , max_noise_level=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowerCAmelCase : Optional[Any] = add_overlap_rect(_UpperCAmelCase , _UpperCAmelCase , image.size )
_lowerCAmelCase : Any = image.crop(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowerCAmelCase : List[Any] = translated_slice_x - (original_image_slice / 2)
_lowerCAmelCase : Dict = max(0 , _UpperCAmelCase )
_lowerCAmelCase : int = squeeze_tile(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[str] = to_input.size
_lowerCAmelCase : Union[str, Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowerCAmelCase : Optional[Any] = super(_UpperCAmelCase , self ).__call__(image=_UpperCAmelCase , **_UpperCAmelCase ).images[0]
_lowerCAmelCase : Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase : Optional[int] = unsqueeze_tile(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[str] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase : Any = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
_lowerCAmelCase : Any = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_UpperCAmelCase ) , mode="""L""" , )
final_image.paste(
_UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _UpperCAmelCase )
@torch.no_grad()
def __call__( self : List[Any] , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _UpperCAmelCase : int = 75 , _UpperCAmelCase : float = 9.0 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 128 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : int = 32 , ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
_lowerCAmelCase : Optional[Any] = math.ceil(image.size[0] / tile_size )
_lowerCAmelCase : List[Any] = math.ceil(image.size[1] / tile_size )
_lowerCAmelCase : List[Any] = tcx * tcy
_lowerCAmelCase : str = 0
for y in range(_UpperCAmelCase ):
for x in range(_UpperCAmelCase ):
self._process_tile(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , prompt=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , noise_level=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def _UpperCAmelCase ():
'''simple docstring'''
# Run a demo
_lowerCAmelCase : Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase : Optional[Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCamelCase_ , revision="""fp16""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to("""cuda""" )
_lowerCAmelCase : int = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(UpperCamelCase_ : Any ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save("""diffusers_library_progress.jpg""" )
_lowerCAmelCase : Tuple = pipe(image=UpperCamelCase_ , prompt="""Black font, white background, vector""" , noise_level=40 , callback=UpperCamelCase_ )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 429 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class a ( __magic_name__ ):
_snake_case = '''timm_backbone'''
def __init__( self : Tuple, SCREAMING_SNAKE_CASE_ : Any=None, SCREAMING_SNAKE_CASE_ : int=3, SCREAMING_SNAKE_CASE_ : Optional[Any]=True, SCREAMING_SNAKE_CASE_ : Any=True, SCREAMING_SNAKE_CASE_ : int=None, **SCREAMING_SNAKE_CASE_ : str, ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = backbone
snake_case : Tuple = num_channels
snake_case : Union[str, Any] = features_only
snake_case : List[Any] = use_pretrained_backbone
snake_case : Any = True
snake_case : Tuple = out_indices if out_indices is not None else (-1,)
| 713 |
'''simple docstring'''
from __future__ import annotations
import math
def A ( A_ : int ):
if num <= 0:
snake_case : List[Any] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(A_ )
snake_case : Optional[Any] = [True] * (num + 1)
snake_case : List[str] = []
snake_case : List[Any] = 2
snake_case : str = int(math.sqrt(A_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(A_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , A_ ):
if sieve[i] is True:
snake_case : int = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(A_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 555 | 0 |
'''simple docstring'''
def UpperCamelCase ( a ) -> set:
'''simple docstring'''
__magic_name__ = set()
# edges = list of graph's edges
__magic_name__ = get_edges(a )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__magic_name__ , __magic_name__ = edges.pop()
chosen_vertices.add(a )
chosen_vertices.add(a )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(a )
return chosen_vertices
def UpperCamelCase ( a ) -> set:
'''simple docstring'''
__magic_name__ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 432 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase ( a , a , a ) -> float:
'''simple docstring'''
__magic_name__ = x
__magic_name__ = y
for step in range(a ): # noqa: B007
__magic_name__ = a * a - b * b + x
__magic_name__ = 2 * a * b + y
__magic_name__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase ( a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase ( a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def UpperCamelCase ( a = 800 , a = 600 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
'''simple docstring'''
__magic_name__ = Image.new('''RGB''' , (image_width, image_height) )
__magic_name__ = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__magic_name__ = figure_width / image_width * image_height
__magic_name__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
__magic_name__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
__magic_name__ = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__magic_name__ = get_color_coded_rgb(a )
else:
__magic_name__ = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowerCAmelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 432 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 336 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = 16 ) ->Any:
"""simple docstring"""
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__magic_name__ : Tuple = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : Any = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ : Tuple = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : str = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ : Any = 16
elif accelerator.mixed_precision != "no":
__magic_name__ : Union[str, Any] = 8
else:
__magic_name__ : Optional[Any] = None
return tokenizer.pad(
UpperCAmelCase, padding='''longest''', max_length=UpperCAmelCase, pad_to_multiple_of=UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
__magic_name__ : List[Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=UpperCAmelCase )
__magic_name__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=(accelerator.mixed_precision == '''fp8'''), )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : int = config['''lr''']
__magic_name__ : Any = int(config['''num_epochs'''] )
__magic_name__ : List[str] = int(config['''seed'''] )
__magic_name__ : Optional[int] = int(config['''batch_size'''] )
__magic_name__ : Optional[Any] = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__magic_name__ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
__magic_name__ , __magic_name__ : int = get_dataloaders(UpperCAmelCase, UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ : Any = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ : int = AdamW(params=model.parameters(), lr=UpperCAmelCase )
# Instantiate scheduler
__magic_name__ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : Tuple = outputs.loss
__magic_name__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : List[str] = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase, references=UpperCAmelCase, )
__magic_name__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', UpperCAmelCase )
def lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCAmelCase, default=UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
__magic_name__ : Dict = parser.parse_args()
__magic_name__ : int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 336 | 1 |
def _A( UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
__lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 332 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = 'M-CLIP'
def __init__( self : Any , lowerCamelCase__ : List[Any]=1_024 , lowerCamelCase__ : List[str]=768 , **lowerCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = transformerDimSize
__lowercase = imageDimSize
super().__init__(**lowerCamelCase__ )
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Dict = MCLIPConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Tuple , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = XLMRobertaModel(lowerCamelCase__ )
__lowercase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.transformer(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
__lowercase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCamelCase__ ), embs
| 332 | 1 |
def a(lowercase__ ):
'''simple docstring'''
assert column_title.isupper()
snake_case_ = 0
snake_case_ = len(lowercase__ ) - 1
snake_case_ = 0
while index >= 0:
snake_case_ = (ord(column_title[index] ) - 64) * pow(26 , lowercase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__A = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(default=__snake_case , metadata={"""help""": """The input training data file (a text file)."""} )
__A = field(
default=__snake_case , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__A = field(
default=__snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
if self.train_file is not None:
snake_case_ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
snake_case_ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = 42
__A = True
__A = None
__A = None
def __call__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = 'label' if 'label' in features[0].keys() else 'labels'
snake_case_ = [feature.pop(__UpperCamelCase ) for feature in features]
snake_case_ = len(__UpperCamelCase )
snake_case_ = len(features[0]['input_ids'] )
snake_case_ = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features
]
snake_case_ = list(chain(*__UpperCamelCase ) )
snake_case_ = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
snake_case_ = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
snake_case_ = torch.tensor(__UpperCamelCase , dtype=torch.intaa )
return batch
def a():
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
snake_case_ = {}
if data_args.train_file is not None:
snake_case_ = data_args.train_file
if data_args.validation_file is not None:
snake_case_ = data_args.validation_file
snake_case_ = data_args.train_file.split('.' )[-1]
snake_case_ = load_dataset(
lowercase__ , data_files=lowercase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
snake_case_ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
snake_case_ = [f"""ending{i}""" for i in range(4 )]
snake_case_ = 'sent1'
snake_case_ = 'sent2'
if data_args.max_seq_length is None:
snake_case_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
snake_case_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase__ ):
snake_case_ = [[context] * 4 for context in examples[context_name]]
snake_case_ = examples[question_header_name]
snake_case_ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase__ )
]
# Flatten out
snake_case_ = list(chain(*lowercase__ ) )
snake_case_ = list(chain(*lowercase__ ) )
# Tokenize
snake_case_ = tokenizer(
lowercase__ , lowercase__ , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case_ = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
snake_case_ = train_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case_ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
snake_case_ = eval_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
snake_case_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase__ ):
snake_case_ , snake_case_ = eval_predictions
snake_case_ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
snake_case_ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('train' , lowercase__ )
trainer.save_metrics('train' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
snake_case_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def a(lowercase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 46 | 0 |
def lowerCamelCase( a__):
if not all(char in '''01''' for char in bin_string):
raise ValueError('''Non-binary value was passed to the function''')
if not bin_string:
raise ValueError('''Empty string was passed to the function''')
_SCREAMING_SNAKE_CASE =''''''
while len(A_) % 3 != 0:
_SCREAMING_SNAKE_CASE ='''0''' + bin_string
_SCREAMING_SNAKE_CASE =[
bin_string[index : index + 3]
for index in range(len(A_))
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_SCREAMING_SNAKE_CASE =0
for index, val in enumerate(A_):
oct_val += int(2 ** (2 - index) * int(A_))
oct_string += str(A_)
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod() | 691 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "Salesforce/blip-image-captioning-base"
lowercase__ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
lowercase__ = "image_captioner"
lowercase__ = AutoModelForVisionaSeq
lowercase__ = ["image"]
lowercase__ = ["text"]
def __init__( self : int ,*lowercase_ : int ,**lowercase_ : List[str] ):
requires_backends(self ,['''vision'''] )
super().__init__(*lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : "Image" ):
return self.pre_processor(images=lowercase_ ,return_tensors='''pt''' )
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Any ):
return self.model.generate(**lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Union[str, Any] ):
return self.pre_processor.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ )[0].strip()
| 450 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class A :
def __init__( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(lowercase_ )
self.set_fail_transitions()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(lowercase_ , lowercase_ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(lowercase_ )
def A__ ( self ) -> None:
'''simple docstring'''
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowercase_ )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowercase_ )
lowercase__ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(lowercase_ , self.adlist[child]["""value"""] ) is None
and state != 0
):
lowercase__ = self.adlist[state]["""fail_state"""]
lowercase__ = self.find_next_state(
lowercase_ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def A__ ( self , lowerCamelCase__ ) -> dict[str, list[int]]:
'''simple docstring'''
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(lowercase_ ) ):
while (
self.find_next_state(lowercase_ , string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]["""fail_state"""]
lowercase__ = self.find_next_state(lowercase_ , string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(lowercase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def A ( snake_case__ : Dict ) -> Any:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_) -> Optional[int]:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , lowercase_ , bias=lowercase_) , nn.Linear(lowercase_ , module.out_features , bias=lowercase_) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowercase_)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def _a ( self , lowercase_ , *lowercase_ , **lowercase_) -> List[str]:
return self.module(lowercase_ , *lowercase_ , **lowercase_) + self.adapter(lowercase_)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__UpperCAmelCase = '''bigscience/bloom-1b7'''
# Constant values
__UpperCAmelCase = 2.109659552692574
__UpperCAmelCase = '''Hello my name is'''
__UpperCAmelCase = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
__UpperCAmelCase = 10
def _a ( self) -> Tuple:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name)
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Any:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase_ , device_map='auto')
def _a ( self) -> int:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> List[str]:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(lowercase_ , 'quantization_config'))
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def _a ( self) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
__snake_case = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def _a ( self) -> Tuple:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowercase_ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def _a ( self) -> Optional[int]:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt')
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowercase_) , self.EXPECTED_OUTPUTS)
def _a ( self) -> Optional[Any]:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowercase_ , device_map='auto')
__snake_case = self.tokenizer(self.input_text , return_tensors='pt')
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowercase_) , self.EXPECTED_OUTPUTS)
def _a ( self) -> List[Any]:
with self.assertRaises(lowercase_), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(lowercase_):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowercase_ , load_in_abit=lowercase_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def _a ( self) -> Optional[int]:
with self.assertRaises(lowercase_):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(lowercase_):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(lowercase_):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(lowercase_):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowercase_):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt')
__snake_case = self.model_fpaa.to(torch.floataa)
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu')
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def _a ( self) -> Optional[int]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=lowercase_ , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def _a ( cls) -> List[str]:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name)
__snake_case = 'Translate in German: Hello, my dog is cute'
def _a ( self) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> int:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowercase_ , device_map='auto')
__snake_case = self.tokenizer(self.input_text , return_tensors='pt').to(0)
__snake_case = model.generate(**lowercase_)
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowercase_ , device_map='auto')
__snake_case = self.tokenizer(self.input_text , return_tensors='pt').to(0)
__snake_case = model.generate(**lowercase_)
__snake_case = modules
def _a ( self) -> Union[str, Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowercase_ , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
__snake_case = self.tokenizer(self.input_text , return_tensors='pt').to(0)
__snake_case = model.generate(**lowercase_)
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowercase_ , device_map='auto')
__snake_case = self.tokenizer(self.input_text , return_tensors='pt').to(0)
__snake_case = model.generate(**lowercase_)
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> int:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=lowercase_ , device_map='auto')
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowercase_ , device_map='auto')
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase_ , device_map='auto')
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowercase_ , device_map='auto')
def _a ( self) -> Union[str, Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Dict:
super().setUp()
def _a ( self) -> Optional[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> Optional[Any]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Any:
super().setUp()
def _a ( self) -> Union[str, Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowercase_ , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowercase_) , self.EXPECTED_OUTPUTS)
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
__snake_case = 'facebook/opt-350m'
super().setUp()
def _a ( self) -> List[str]:
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase_)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowercase_)):
__snake_case = LoRALayer(module.q_proj , rank=1_6)
__snake_case = LoRALayer(module.k_proj , rank=1_6)
__snake_case = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**lowercase_)
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowercase_ , lowercase_):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(lowercase_ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''gpt2-xl'''
__UpperCAmelCase = 3.3191854854152187
| 313 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase =random.Random()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ):
if rng is None:
UpperCamelCase__ : Optional[Any] = global_rng
UpperCamelCase__ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=2_0_0_0 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_4_1_0_0 , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : Optional[int] = min_seq_length
UpperCamelCase__ : Optional[Any] = max_seq_length
UpperCamelCase__ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ : Optional[Any] = spectrogram_length
UpperCamelCase__ : Dict = feature_size
UpperCamelCase__ : Optional[Any] = num_audio_channels
UpperCamelCase__ : Union[str, Any] = hop_length
UpperCamelCase__ : str = chunk_length
UpperCamelCase__ : str = sampling_rate
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
def _flatten(__SCREAMING_SNAKE_CASE ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
UpperCamelCase__ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__ : Dict = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TvltFeatureExtractor
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = TvltFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''feature_size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hop_length''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''chunk_length''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''sampling_rate''' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Optional[Any] = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = feat_extract_first.to_dict()
UpperCamelCase__ : List[str] = feat_extract_second.to_dict()
UpperCamelCase__ : Optional[int] = dict_first.pop('''mel_filters''' )
UpperCamelCase__ : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = feat_extract_first.to_dict()
UpperCamelCase__ : Optional[int] = feat_extract_second.to_dict()
UpperCamelCase__ : Optional[int] = dict_first.pop('''mel_filters''' )
UpperCamelCase__ : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase__ : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase__ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCamelCase__ : Union[str, Any] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCamelCase__ : List[Any] = feature_extractor(
__SCREAMING_SNAKE_CASE , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=__SCREAMING_SNAKE_CASE ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase__ : Optional[Any] = np.asarray(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCamelCase__ : int = ds.sort('''id''' ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self._load_datasamples(1 )
UpperCamelCase__ : Optional[int] = TvltFeatureExtractor()
UpperCamelCase__ : Any = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
UpperCamelCase__ : Tuple = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 715 |
lowerCamelCase ={"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowerCamelCase =["a", "b", "c", "d", "e"]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = start
# add current to visited
visited.append(UpperCamelCase__ )
UpperCamelCase__ : int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase__ : int = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase__ : Optional[int] = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase =topological_sort("a", [], [])
print(sort)
| 462 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __UpperCamelCase ( _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[Any] = {}
A : Tuple = job["""started_at"""]
A : str = job["""completed_at"""]
A : List[Any] = date_parser.parse(_lowerCAmelCase )
A : Optional[int] = date_parser.parse(_lowerCAmelCase )
A : Union[str, Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A : int = start
A : Tuple = end
A : Optional[int] = duration_in_min
return job_info
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
"""simple docstring"""
A : int = None
if token is not None:
A : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
A : List[str] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
A : List[str] = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
A : Tuple = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["""jobs"""]} )
A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(_lowerCAmelCase ):
A : List[str] = requests.get(url + f'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_:Any = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE_:Union[str, Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""")
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertTokenizer
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_non_english
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
lowercase_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowercase_ = {}
lowercase_ = {}
for i, value in enumerate(__lowerCAmelCase ):
lowercase_ = i
lowercase_ = i
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__lowerCAmelCase , __lowerCAmelCase , ensure_ascii=__lowerCAmelCase )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__lowerCAmelCase , __lowerCAmelCase , ensure_ascii=__lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase_ = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__lowerCAmelCase , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowercase_ = {}
for i, token in enumerate(__lowerCAmelCase ):
lowercase_ = i
lowercase_ = RoCBertWordpieceTokenizer(vocab=__lowerCAmelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def A__ ( self ) -> int:
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
lowercase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def A__ ( self ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase_ = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase_ = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
lowercase_ = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , "do_lower_case" ) else False
lowercase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = ["的", "人", "有"]
lowercase_ = "".join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ = True
lowercase_ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase_ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase_ = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase_ = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase_ = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
lowercase_ = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = False
lowercase_ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase_ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase_ = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase_ = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase_ = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
lowercase_ = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase_ = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase_ = tokenizer.encode("你好" , add_special_tokens=__lowerCAmelCase )
lowercase_ = tokenizer.encode("你是谁" , add_special_tokens=__lowerCAmelCase )
lowercase_ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowercase_ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowercase_ = "你好,你是谁"
lowercase_ = tokenizer.tokenize(__lowerCAmelCase )
lowercase_ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
lowercase_ = tokenizer.convert_tokens_to_shape_ids(__lowerCAmelCase )
lowercase_ = tokenizer.convert_tokens_to_pronunciation_ids(__lowerCAmelCase )
lowercase_ = tokenizer.prepare_for_model(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase_ = tokenizer.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 707 |
import string
import numpy
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , __lowerCamelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCAmelCase__ = numpy.vectorize(lambda snake_case_ : x % 36 )
lowerCAmelCase__ = numpy.vectorize(snake_case_ )
def __init__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
lowercase_ = self.modulus(UpperCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowercase_ = encrypt_key.shape[0]
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self.key_string.index(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self.key_string[round(UpperCAmelCase )]
def A__ ( self ) -> None:
'''simple docstring'''
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase , len(self.key_string ) ) != 1:
lowercase_ = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [char for char in text.upper() if char in self.key_string]
lowercase_ = chars[-1]
while len(UpperCAmelCase ) % self.break_key != 0:
chars.append(UpperCAmelCase )
return "".join(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.process_text(text.upper() )
lowercase_ = ""
for i in range(0 , len(UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(UpperCAmelCase ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(self.encrypt_key.dot(UpperCAmelCase ) ).T.tolist()[
0
]
lowercase_ = "".join(
self.replace_digits(UpperCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def A__ ( self ) -> numpy.ndarray:
'''simple docstring'''
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowercase_ = i
break
lowercase_ = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.make_decrypt_key()
lowercase_ = self.process_text(text.upper() )
lowercase_ = ""
for i in range(0 , len(UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(UpperCAmelCase ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(decrypt_key.dot(UpperCAmelCase ) ).T.tolist()[0]
lowercase_ = "".join(
self.replace_digits(UpperCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = int(input("Enter the order of the encryption key: " ) )
lowercase_ = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(__lowerCamelCase ):
lowercase_ = [int(__lowerCamelCase ) for x in input().split()]
hill_matrix.append(__lowerCamelCase )
lowercase_ = HillCipher(numpy.array(__lowerCamelCase ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
lowercase_ = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
lowercase_ = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(__lowerCamelCase ) )
elif option == "2":
lowercase_ = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 601 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Dict=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
SCREAMING_SNAKE_CASE_ : List[Any] = 'segformer.encoder.' + key
if key.startswith('backbone' ):
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE_ : Optional[int] = key[key.find('patch_embed' ) + len('patch_embed' )]
SCREAMING_SNAKE_CASE_ : int = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCamelCase_ )-1}' )
if "norm" in key:
SCREAMING_SNAKE_CASE_ : Dict = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE_ : int = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
SCREAMING_SNAKE_CASE_ : Tuple = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCamelCase_ )-1}' )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE_ : Any = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE_ : Dict = key[key.find('block' ) + len('block' )]
SCREAMING_SNAKE_CASE_ : Any = key.replace(F'block{idx}' , F'block.{int(lowerCamelCase_ )-1}' )
if "attn.q" in key:
SCREAMING_SNAKE_CASE_ : int = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE_ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace('linear_fuse.conv' , 'linear_fuse' )
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE_ : List[Any] = key[key.find('linear_c' ) + len('linear_c' )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCamelCase_ )-1}' )
if key.startswith('head' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace('head' , 'classifier' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
return new_state_dict
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any ) -> str:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE_ : List[str] = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : Tuple = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE_ : Dict = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE_ : Dict = kv_bias[
config.hidden_sizes[i] :
]
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : int = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = SegformerConfig()
SCREAMING_SNAKE_CASE_ : Dict = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE_ : Tuple = 'huggingface/label-files'
if "segformer" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE_ : Dict = 1_50
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE_ : int = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
SCREAMING_SNAKE_CASE_ : Dict = 19
SCREAMING_SNAKE_CASE_ : Dict = 'cityscapes-id2label.json'
SCREAMING_SNAKE_CASE_ : Optional[Any] = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Dict = model_name[4:6]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 10_00
SCREAMING_SNAKE_CASE_ : List[str] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ : int = (1, 10_00)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Tuple = idalabel
SCREAMING_SNAKE_CASE_ : List[str] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE_ : List[Any] = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE_ : int = 2_56
elif size == "b2":
SCREAMING_SNAKE_CASE_ : Optional[Any] = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE_ : List[str] = 7_68
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE_ : int = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE_ : str = 7_68
SCREAMING_SNAKE_CASE_ : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE_ : Optional[int] = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE_ : Any = 7_68
SCREAMING_SNAKE_CASE_ : List[Any] = [3, 8, 27, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE_ : int = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE_ : Optional[int] = 7_68
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE_ : Any = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowerCamelCase_ , align=lowerCamelCase_ , do_random_crop=lowerCamelCase_ )
# prepare image
SCREAMING_SNAKE_CASE_ : str = prepare_img()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load(lowerCamelCase_ , map_location=torch.device('cpu' ) )
else:
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(lowerCamelCase_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
SCREAMING_SNAKE_CASE_ : Optional[Any] = rename_keys(lowerCamelCase_ , encoder_only=lowerCamelCase_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase_ , lowerCamelCase_ )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = SegformerForImageClassification(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : int = SegformerForSemanticSegmentation(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE_ : Any = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE_ : int = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE_ : int = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ : int = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase_ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 105 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ) ) )
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'Wrong input data\'s dimensions... '
F'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(lowerCamelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
SCREAMING_SNAKE_CASE_ : List[Any] = (
'Wrong input data\'s shape... '
F'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(lowerCamelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
SCREAMING_SNAKE_CASE_ : Tuple = (
'Input data have different datatype... '
F'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for value in value_array:
SCREAMING_SNAKE_CASE_ : str = euclidean(lowerCamelCase_ , dataset[0] )
SCREAMING_SNAKE_CASE_ : List[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = euclidean(lowerCamelCase_ , lowerCamelCase_ )
if dist > temp_dist:
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_dist
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> float:
"""simple docstring"""
return np.dot(lowerCamelCase_ , lowerCamelCase_ ) / (norm(lowerCamelCase_ ) * norm(lowerCamelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 1 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def A ( A_ : str , A_ : str ):
snake_case : Union[str, Any] = RobertaPreLayerNormConfig.from_pretrained(
A_ , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
snake_case : Tuple = torch.load(hf_hub_download(repo_id=A_ , filename='''pytorch_model.bin''' ) )
snake_case : List[Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
snake_case : List[str] = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
snake_case : str = tensor_value
snake_case : List[str] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=A_ , config=A_ , state_dict=A_ )
model.save_pretrained(A_ )
# convert tokenizer
snake_case : Tuple = AutoTokenizer.from_pretrained(A_ )
tokenizer.save_pretrained(A_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 555 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"Salesforce/codegen-350M-mono": 2_048,
}
class a ( __magic_name__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = CodeGenTokenizer
def __init__( self : Any, SCREAMING_SNAKE_CASE_ : Dict=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[int]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : List[Any]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : List[str]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : int=False, **SCREAMING_SNAKE_CASE_ : Optional[int], ):
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if kwargs.pop('''add_bos_token''', SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = kwargs.pop('''name_or_path''', '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
snake_case : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
snake_case : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('''type''' ) )
snake_case : Union[str, Any] = add_prefix_space
snake_case : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
snake_case : int = add_prefix_space
def __snake_case ( self : List[Any], *SCREAMING_SNAKE_CASE_ : Tuple, **SCREAMING_SNAKE_CASE_ : int ):
snake_case : Dict = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str], *SCREAMING_SNAKE_CASE_ : str, **SCREAMING_SNAKE_CASE_ : Optional[int] ):
snake_case : Dict = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
snake_case : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict, SCREAMING_SNAKE_CASE_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], SCREAMING_SNAKE_CASE_ : bool = False, SCREAMING_SNAKE_CASE_ : bool = None, SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None, **SCREAMING_SNAKE_CASE_ : Union[str, Any], ):
snake_case : Dict = super().decode(
token_ids=SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if truncate_before_pattern is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
snake_case : Optional[int] = self.truncate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return decoded_text
def __snake_case ( self : int, SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : Tuple ):
def find_re(SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : Optional[Any] ):
snake_case : Optional[Any] = pattern.search(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return m.start() if m else -1
snake_case : Union[str, Any] = [re.compile(SCREAMING_SNAKE_CASE_, re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case : Union[str, Any] = list(re.finditer('''^print''', SCREAMING_SNAKE_CASE_, re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
snake_case : Tuple = completion[: prints[1].start()]
snake_case : List[str] = list(re.finditer('''^def''', SCREAMING_SNAKE_CASE_, re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
snake_case : Optional[Any] = completion[: defs[1].start()]
snake_case : Tuple = 0
snake_case : List[Any] = [
pos for pos in [find_re(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for terminal in terminals] if pos != -1
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
return completion[: min(SCREAMING_SNAKE_CASE_ )]
else:
return completion
| 555 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : Optional[Any] = logging.getLogger()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
lowerCAmelCase = {}
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = json.load(_UpperCAmelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
__UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import xla_spawn
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
lowerCAmelCase = time()
xla_spawn.main()
lowerCAmelCase = time()
lowerCAmelCase = get_results(_snake_case )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import xla_spawn
lowerCAmelCase = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
xla_spawn.main()
| 4 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class snake_case_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = MvpTokenizer
snake_case__ = MvpTokenizerFast
snake_case__ = True
snake_case__ = filter_roberta_detectors
def UpperCAmelCase__ (self: List[str] ) -> Any:
'''simple docstring'''
super().setUp()
__a : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__a : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__a : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__a : Optional[int] = {"unk_token": "<unk>"}
__a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
def UpperCAmelCase__ (self: List[Any] , **__UpperCAmelCase: Dict ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase__ (self: str , **__UpperCAmelCase: Tuple ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase__ (self: Any , __UpperCAmelCase: int ) -> List[str]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase__ (self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def UpperCAmelCase__ (self: str ) -> List[Any]:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def UpperCAmelCase__ (self: Dict ) -> List[Any]:
'''simple docstring'''
__a : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__a : List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[Any] = tokenizer(__UpperCAmelCase , max_length=len(__UpperCAmelCase ) , padding=__UpperCAmelCase , return_tensors="pt" )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Test that special tokens are reset
@require_torch
def UpperCAmelCase__ (self: Optional[int] ) -> List[Any]:
'''simple docstring'''
__a : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , __UpperCAmelCase )
self.assertIn("attention_mask" , __UpperCAmelCase )
self.assertNotIn("labels" , __UpperCAmelCase )
self.assertNotIn("decoder_attention_mask" , __UpperCAmelCase )
@require_torch
def UpperCAmelCase__ (self: Any ) -> Union[str, Any]:
'''simple docstring'''
__a : List[Any] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(text_target=__UpperCAmelCase , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def UpperCAmelCase__ (self: int ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : str = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def UpperCAmelCase__ (self: str ) -> Any:
'''simple docstring'''
__a : Optional[int] = ["A long paragraph for summarization."]
__a : Tuple = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(__UpperCAmelCase , text_target=__UpperCAmelCase , return_tensors="pt" )
__a : str = inputs["input_ids"]
__a : Optional[Any] = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCAmelCase__ (self: Dict ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Any ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a : Tuple = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__a : List[Any] = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__a : Tuple = "A, <mask> AllenNLP sentence."
__a : Dict = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
__a : List[Any] = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__a : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__a : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 351 | 0 |
from collections import deque
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = len(lowercase )
SCREAMING_SNAKE_CASE : Any = deque()
SCREAMING_SNAKE_CASE : List[Any] = [False for _ in range(lowercase )]
SCREAMING_SNAKE_CASE : Any = [-1 for _ in range(lowercase )]
SCREAMING_SNAKE_CASE : Optional[int] = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE : str = index # the number when this node is seen
SCREAMING_SNAKE_CASE : Optional[int] = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
SCREAMING_SNAKE_CASE : Any = True
for w in g[v]:
if index_of[w] == -1:
SCREAMING_SNAKE_CASE : Union[str, Any] = strong_connect(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
SCREAMING_SNAKE_CASE : Any = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : int = stack.pop()
SCREAMING_SNAKE_CASE : List[str] = False
component.append(lowercase )
while w != v:
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
SCREAMING_SNAKE_CASE : List[str] = False
component.append(lowercase )
components.append(lowercase )
return index
SCREAMING_SNAKE_CASE : Optional[int] = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
snake_case = 7
snake_case = [0, 0, 1, 2, 3, 3, 4, 4, 6]
snake_case = [1, 3, 2, 0, 1, 4, 5, 6, 5]
snake_case = [(u, v) for u, v in zip(source, target)]
snake_case = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 488 |
import qiskit
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : Dict = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 488 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''note_seq''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
requires_backends(self , ["""note_seq"""] )
@classmethod
def __UpperCamelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
requires_backends(cls , ["""note_seq"""] )
@classmethod
def __UpperCamelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
requires_backends(cls , ["""note_seq"""] )
| 38 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCAmelCase = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]=False ) -> List[str]:
'''simple docstring'''
a__ , a__ : Optional[int] = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowercase__ ( lowerCAmelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : Any = {}
a__ : Tuple = R".*sequential.(\d+).*"
a__ : Tuple = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
a__ : str = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
a__ : Optional[Any] = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
a__ : int = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(lowerCAmelCase__ )//3}.linear." )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Tuple = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
a__ : List[Any] = 1 if projecton_layer == 0 else 2
a__ : Dict = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
a__ : List[Any] = value
a__ : List[Any] = mixed_qkv.size(0 ) // 3
a__ : Optional[int] = mixed_qkv[:qkv_dim]
a__ : List[str] = mixed_qkv[qkv_dim : qkv_dim * 2]
a__ : Optional[Any] = mixed_qkv[qkv_dim * 2 :]
a__ : Tuple = query_layer
a__ : int = key_layer
a__ : Optional[int] = value_layer
else:
a__ : List[str] = value
return model_state_dict
def lowercase__ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple=False ) -> Tuple:
'''simple docstring'''
a__ , a__ : Tuple = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
a__ : Optional[int] = clap_model.state_dict()
a__ : Optional[Any] = rename_state_dict(lowerCAmelCase__ )
a__ : Union[str, Any] = ClapConfig()
a__ : Dict = enable_fusion
a__ : Any = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
__UpperCAmelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 642 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( a ):
if not isinstance(a , a ):
__snake_case = f'Input value of [number={number}] must be an integer'
raise TypeError(a )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 427 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_lowercase = """scheduler_config.json"""
class a_ ( UpperCAmelCase__ ):
lowercase_ : int = 1
lowercase_ : Tuple = 2
lowercase_ : List[Any] = 3
lowercase_ : Optional[int] = 4
lowercase_ : Dict = 5
lowercase_ : str = 6
lowercase_ : Tuple = 7
lowercase_ : Tuple = 8
lowercase_ : List[Any] = 9
lowercase_ : Dict = 10
lowercase_ : Optional[int] = 11
lowercase_ : List[Any] = 12
lowercase_ : Union[str, Any] = 13
lowercase_ : List[str] = 14
@dataclass
class a_ ( UpperCAmelCase__ ):
lowercase_ : torch.FloatTensor
class a_ :
lowercase_ : str = SCHEDULER_CONFIG_NAME
lowercase_ : Union[str, Any] = []
lowercase_ : Dict = True
@classmethod
def lowercase__ ( cls : Optional[Any] , __lowerCAmelCase : Dict[str, Any] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : str=False , **__lowerCAmelCase : Optional[int] , ):
__snake_case , __snake_case , __snake_case = cls.load_config(
pretrained_model_name_or_path=__lowerCAmelCase , subfolder=__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , return_commit_hash=__lowerCAmelCase , **__lowerCAmelCase , )
return cls.from_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : Union[str, os.PathLike] , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Tuple ):
self.save_config(save_directory=__lowerCAmelCase , push_to_hub=__lowerCAmelCase , **__lowerCAmelCase )
@property
def lowercase__ ( self : Union[str, Any] ):
return self._get_compatibles()
@classmethod
def lowercase__ ( cls : Union[str, Any] ):
__snake_case = list(set([cls.__name__] + cls._compatibles ) )
__snake_case = importlib.import_module(__name__.split('.' )[0] )
__snake_case = [
getattr(__lowerCAmelCase , __lowerCAmelCase ) for c in compatible_classes_str if hasattr(__lowerCAmelCase , __lowerCAmelCase )
]
return compatible_classes
| 427 | 1 |
import os
import pytest
from attr import dataclass
UpperCamelCase = "us-east-1" # defaults region
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : str
_UpperCamelCase : int = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
_UpperCamelCase : Dict = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
_UpperCamelCase : int = {**hyperparameters, "max_steps": 1000}
@property
def __a ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __a ( self ):
return F"""{self.framework}-transfromers-test"""
@property
def __a ( self ):
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def __a ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 66 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowercase : List[Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_lowerCAmelCase )
from datasets import load_dataset
_lowercase : Union[str, Any] = load_dataset('nielsr/rvlcdip-demo' )
_lowercase : Any = dataset['train'][0]['image'].convert('RGB' )
_lowercase : List[str] = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**_lowerCAmelCase )
_lowercase : Any = outputs.logits
_lowercase : str = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , _lowerCAmelCase )
_lowercase : Union[str, Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=_lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66 | 1 |
import math
import random
def __lowercase ( _A , _A = False ) -> int:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase__ : Union[str, Any] = 0.02
def __lowercase ( _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__lowerCAmelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE : int = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE : int = (expected / 100) - layer_a
# Error delta
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_1_error * sigmoid_function(__lowerCAmelCase , __lowerCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Optional[int] = int(input("""Expected value: """))
UpperCAmelCase__ : Dict = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 718 |
def __lowercase ( _A ) -> str:
SCREAMING_SNAKE_CASE : Optional[Any] = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __lowercase ( _A ) -> dict[str, str]:
SCREAMING_SNAKE_CASE : int = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
SCREAMING_SNAKE_CASE : List[str] = remove_duplicates(key.upper() )
SCREAMING_SNAKE_CASE : Dict = len(_A )
# First fill cipher with key characters
SCREAMING_SNAKE_CASE : int = {alphabet[i]: char for i, char in enumerate(_A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_A ) , 26 ):
SCREAMING_SNAKE_CASE : List[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
SCREAMING_SNAKE_CASE : str = alphabet[i - offset]
SCREAMING_SNAKE_CASE : List[Any] = char
return cipher_alphabet
def __lowercase ( _A , _A ) -> str:
return "".join(cipher_map.get(_A , _A ) for ch in message.upper() )
def __lowercase ( _A , _A ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_A , _A ) for ch in message.upper() )
def __lowercase ( ) -> None:
SCREAMING_SNAKE_CASE : List[Any] = input("""Enter message to encode or decode: """ ).strip()
SCREAMING_SNAKE_CASE : str = input("""Enter keyword: """ ).strip()
SCREAMING_SNAKE_CASE : Dict = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
SCREAMING_SNAKE_CASE : int = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
SCREAMING_SNAKE_CASE : Optional[int] = create_cipher_map(_A )
print(func(_A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 446 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ : List[str] = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = ["""ConvNextFeatureExtractor"""]
__magic_name__ : Any = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 102 |
import math
def lowerCamelCase_ ( UpperCamelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( UpperCamelCase_ = 0.1 ):
_a : int = 3
_a : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCamelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 471 | 0 |
from __future__ import annotations
import math
def _lowerCAmelCase(a : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCAmelCase_ : Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def _lowerCAmelCase(a : int ) -> list[int]:
if not isinstance(a , a ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_SCREAMING_SNAKE_CASE =[]
for num in range(len(a ) ):
_SCREAMING_SNAKE_CASE =0
while 2 * i * i <= odd_composites[num]:
_SCREAMING_SNAKE_CASE =odd_composites[num] - 2 * i * i
if is_prime(a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(a ) == n:
return list_nums
return []
def _lowerCAmelCase() -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"{solution() = }")
| 711 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : List[Any] = 2_5_6
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = ["melgan"]
def __init__( self , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
# From MELGAN
_SCREAMING_SNAKE_CASE =math.log(1E-5 ) # Matches MelGAN training.
_SCREAMING_SNAKE_CASE =4.0 # Largest value for most examples
_SCREAMING_SNAKE_CASE =1_2_8
self.register_modules(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
def UpperCamelCase_ ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output_range
if clip:
_SCREAMING_SNAKE_CASE =torch.clip(_A , self.min_value , self.max_value )
# Scale to [0, 1].
_SCREAMING_SNAKE_CASE =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase_ ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =input_range
_SCREAMING_SNAKE_CASE =torch.clip(_A , _A , _A ) if clip else outputs
# Scale to [0, 1].
_SCREAMING_SNAKE_CASE =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =input_tokens > 0
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.notes_encoder(
encoder_input_tokens=_A , encoder_inputs_mask=_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.continuous_encoder(
encoder_inputs=_A , encoder_inputs_mask=_A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =noise_time
if not torch.is_tensor(_A ):
_SCREAMING_SNAKE_CASE =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE =timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_SCREAMING_SNAKE_CASE =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_SCREAMING_SNAKE_CASE =self.decoder(
encodings_and_masks=_A , decoder_input_tokens=_A , decoder_noise_time=_A )
return logits
@torch.no_grad()
def __call__( self , _A , _A = None , _A = 1_0_0 , _A = True , _A = "numpy" , _A = None , _A = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
_SCREAMING_SNAKE_CASE =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =np.zeros([1, 0, self.n_dims] , np.floataa )
_SCREAMING_SNAKE_CASE =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
for i, encoder_input_tokens in enumerate(_A ):
if i == 0:
_SCREAMING_SNAKE_CASE =torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_SCREAMING_SNAKE_CASE =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_SCREAMING_SNAKE_CASE =ones
_SCREAMING_SNAKE_CASE =self.scale_features(
_A , output_range=[-1.0, 1.0] , clip=_A )
_SCREAMING_SNAKE_CASE =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_A , continuous_mask=_A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_SCREAMING_SNAKE_CASE =randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_SCREAMING_SNAKE_CASE =self.decode(
encodings_and_masks=_A , input_tokens=_A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_SCREAMING_SNAKE_CASE =self.scheduler.step(_A , _A , _A , generator=_A ).prev_sample
_SCREAMING_SNAKE_CASE =self.scale_to_features(_A , input_range=[-1.0, 1.0] )
_SCREAMING_SNAKE_CASE =mel[:1]
_SCREAMING_SNAKE_CASE =mel.cpu().float().numpy()
_SCREAMING_SNAKE_CASE =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A )
logger.info('''Generated segment''' , _A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_SCREAMING_SNAKE_CASE =self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_SCREAMING_SNAKE_CASE =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_A )
| 165 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , ) -> Any:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ImageGPTImageProcessingTester(self)
@property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''clusters'''))
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''size'''))
self.assertTrue(hasattr(__a , '''do_normalize'''))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
_UpperCamelCase = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , obj[key]))
else:
self.assertEqual(obj[key] , __a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(__a , '''image_processor.json''')
image_processor_first.to_json_file(__a)
_UpperCamelCase = self.image_processing_class.from_json_file(__a).to_dict()
_UpperCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__a)
_UpperCamelCase = self.image_processing_class.from_pretrained(__a).to_dict()
_UpperCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , __a)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = load_dataset('''hf-internal-testing/fixtures_image_utils''', split='''test''' )
_UpperCamelCase = Image.open(dataset[4]['''file'''] )
_UpperCamelCase = Image.open(dataset[5]['''file'''] )
_UpperCamelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
_UpperCamelCase = prepare_images()
# test non-batched
_UpperCamelCase = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 10_24))
_UpperCamelCase = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __a)
# test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 10_24))
_UpperCamelCase = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __a)
| 19 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCAmelCase:
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a , decoder_attention_mask=__a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.not_equal(__snake_case, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = model_class(__a)
@jax.jit
def encode_jitted(__a , __a=None , **__a):
return model.encode(input_ids=__a , attention_mask=__a)
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = encode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__a , __a , __a):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = decode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__a)
_UpperCamelCase = np.ones((1, 1))
_UpperCamelCase = model(__a)
self.assertIsNotNone(__a)
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''np''' , truncation=__a , max_length=5_12 , padding=__a)
_UpperCamelCase = model.generate(**__a , num_beams=2).sequences
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
assert tgt_text == decoded
| 19 | 1 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 705 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_a : List[str] = datasets.logging.get_logger(__name__)
_a : Optional[Any] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
_a : Dict = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
_a : Dict = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
_a : List[Any] = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def lowerCamelCase_ ( self , UpperCAmelCase ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
__lowerCamelCase = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
__lowerCamelCase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowerCamelCase = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowerCamelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowerCamelCase = score.BleurtScorer(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.scorer.score(references=UpperCAmelCase , candidates=UpperCAmelCase )
return {"scores": scores}
| 571 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE ={
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 234 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> np.array:
__magic_name__ = int(np.ceil((x_end - xa) / step_size ) )
__magic_name__ = np.zeros((n + 1,) )
__magic_name__ = ya
__magic_name__ = xa
for k in range(__UpperCamelCase ):
__magic_name__ = y[k] + step_size * ode_func(__UpperCamelCase , y[k] )
__magic_name__ = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase , y[k] ) + ode_func(x + step_size , __UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 490 | 0 |
from datetime import datetime
import requests
def snake_case( __magic_name__ ) -> bytes:
'''simple docstring'''
lowercase : str = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowercase : List[Any] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__magic_name__ ).content
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter Video/IGTV url: ').strip()
lowerCAmelCase_ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''') | 596 |
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : list[list[str]] = [[] for _ in range(__magic_name__ )]
lowercase : List[Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(__magic_name__ ) <= key:
return input_string
for position, character in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = position % (lowest * 2) # puts it in bounds
lowercase : int = min(__magic_name__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__magic_name__ )
lowercase : Union[str, Any] = [''''''.join(__magic_name__ ) for row in temp_grid]
lowercase : Optional[int] = ''''''.join(__magic_name__ )
return output_string
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : List[str] = []
lowercase : str = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
lowercase : list[list[str]] = [[] for _ in range(__magic_name__ )] # generates template
for position in range(len(__magic_name__ ) ):
lowercase : Optional[int] = position % (lowest * 2) # puts it in bounds
lowercase : str = min(__magic_name__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
lowercase : List[Any] = 0
for row in temp_grid: # fills in the characters
lowercase : int = input_string[counter : counter + len(__magic_name__ )]
grid.append(list(__magic_name__ ) )
counter += len(__magic_name__ )
lowercase : str = '''''' # reads as zigzag
for position in range(len(__magic_name__ ) ):
lowercase : Any = position % (lowest * 2) # puts it in bounds
lowercase : Optional[Any] = min(__magic_name__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case( __magic_name__ ) -> dict[int, str]:
'''simple docstring'''
lowercase : int = {}
for key_guess in range(1 , len(__magic_name__ ) ): # tries every key
lowercase : Any = decrypt(__magic_name__ , __magic_name__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 596 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ : Dict = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ : Tuple = {
'gpt2': 10_24,
'gpt2-medium': 10_24,
'gpt2-large': 10_24,
'gpt2-xl': 10_24,
'distilgpt2': 10_24,
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =['''input_ids''', '''attention_mask''']
__a =GPTaTokenizer
def __init__( self : int , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[str]=None , __a : Any="<|endoftext|>" , __a : int="<|endoftext|>" , __a : List[str]="<|endoftext|>" , __a : str=False , **__a : Union[str, Any] , ):
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , add_prefix_space=__A , **__A , )
_a = kwargs.pop("add_bos_token" , __A )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
_a = getattr(__A , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**__A )
_a = add_prefix_space
def UpperCamelCase__ ( self : Any , *__a : List[Any] , **__a : Tuple ):
_a = kwargs.get("is_split_into_words" , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A , **__A )
def UpperCamelCase__ ( self : int , *__a : Any , **__a : Any ):
_a = kwargs.get("is_split_into_words" , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A , **__A )
def UpperCamelCase__ ( self : int , __a : str , __a : Optional[str] = None ):
_a = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def UpperCamelCase__ ( self : List[Any] , __a : "Conversation" ):
_a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
_a = input_ids[-self.model_max_length :]
return input_ids
| 692 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bool , __magic_name__ : bool ) -> int:
"""simple docstring"""
def run_func(__magic_name__ : str ):
@wraps(__magic_name__ )
def run_in_eager_mode(*__magic_name__ : Optional[Any] , **__magic_name__ : List[Any] ):
return func(*__magic_name__ , **__magic_name__ )
@wraps(__magic_name__ )
@tf.function(experimental_compile=__magic_name__ )
def run_in_graph_mode(*__magic_name__ : Any , **__magic_name__ : List[Any] ):
return func(*__magic_name__ , **__magic_name__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = random.Random()
UpperCamelCase :Dict = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__magic_name__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : TensorFlowBenchmarkArguments
snake_case__ : PretrainedConfig
snake_case__ : str = "TensorFlow"
@property
def _A ( self : List[Any] ):
return tf.__version__
def _A ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
# initialize GPU on separate process
UpperCamelCase :List[Any] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase :Union[str, Any] = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_inference )
def _A ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
UpperCamelCase :int = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase :Union[str, Any] = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_train )
def _A ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
UpperCamelCase :List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase :List[str] = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_inference )
def _A ( self : str , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
UpperCamelCase :List[Any] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase :Any = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_train )
def _A ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
UpperCamelCase :Optional[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCamelCase :List[Any] = (
hasattr(__lowerCamelCase , """architectures""" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase :Optional[Any] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase :List[str] = __import__("""transformers""" , fromlist=[model_class] )
UpperCamelCase :Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCamelCase :int = TF_MODEL_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
UpperCamelCase :Optional[int] = config.vocab_size if hasattr(__lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
UpperCamelCase :str = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , training=__lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCamelCase , training=__lowerCamelCase )
UpperCamelCase :int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _A ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
UpperCamelCase :Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCamelCase :Optional[int] = (
hasattr(__lowerCamelCase , """architectures""" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase :Any = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase :List[str] = __import__("""transformers""" , fromlist=[model_class] )
UpperCamelCase :Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCamelCase :Tuple = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
UpperCamelCase :Union[str, Any] = config.vocab_size if hasattr(__lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
UpperCamelCase :List[str] = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCamelCase :Dict = model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
UpperCamelCase :Dict = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCamelCase :List[Any] = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
UpperCamelCase :Tuple = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
UpperCamelCase :Dict = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _A ( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCamelCase :List[Any] = timeit.repeat(
__lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def _A ( self : Dict , __lowerCamelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
UpperCamelCase :int = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
UpperCamelCase :Optional[Any] = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
UpperCamelCase :int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCamelCase :int = nvml.nvmlDeviceGetMemoryInfo(__lowerCamelCase )
UpperCamelCase :List[str] = meminfo.used
UpperCamelCase :Optional[int] = Memory(__lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
UpperCamelCase :Optional[int] = None
else:
UpperCamelCase :Any = measure_peak_memory_cpu(__lowerCamelCase )
UpperCamelCase :str = Memory(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCamelCase :Optional[int] = stop_memory_tracing(__lowerCamelCase )
if memory is None:
UpperCamelCase :List[Any] = summary.total
else:
UpperCamelCase :Dict = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 703 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase_ : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
snake_case__ : Optional[datasets.Features] = None
snake_case__ : str = "utf-8"
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
snake_case__ : bool = True # deprecated
snake_case__ : Optional[int] = None # deprecated
snake_case__ : int = 1_0 << 2_0 # 10MB
snake_case__ : Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
snake_case__ : Optional[Any] = JsonConfig
def _A ( self : Any ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase :int = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def _A ( self : List[Any] , __lowerCamelCase : Optional[int] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase :Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
UpperCamelCase :Optional[int] = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :Optional[Any] = [files]
UpperCamelCase :int = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase :Tuple = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = [files]
UpperCamelCase :List[str] = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"""files""": files} ) )
return splits
def _A ( self : List[Any] , __lowerCamelCase : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase :Any = self.config.features.arrow_schema.field(__lowerCamelCase ).type
UpperCamelCase :List[Any] = pa_table.append_column(__lowerCamelCase , pa.array([None] * len(__lowerCamelCase ) , type=__lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase :int = table_cast(__lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _A ( self : List[str] , __lowerCamelCase : Optional[Any] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase :Optional[Any] = json.load(__lowerCamelCase )
# We keep only the field we are interested in
UpperCamelCase :int = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCamelCase , (list, tuple) ):
UpperCamelCase :Dict = set().union(*[row.keys() for row in dataset] )
UpperCamelCase :int = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
else:
UpperCamelCase :Optional[Any] = dataset
UpperCamelCase :int = pa.Table.from_pydict(__lowerCamelCase )
yield file_idx, self._cast_table(__lowerCamelCase )
# If the file has one json object per line
else:
with open(__lowerCamelCase , """rb""" ) as f:
UpperCamelCase :List[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase :str = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase :int = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCamelCase :Optional[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase :List[str] = batch.decode(self.config.encoding , errors=__lowerCamelCase ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase :str = paj.read_json(
io.BytesIO(__lowerCamelCase ) , read_options=paj.ReadOptions(block_size=__lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCamelCase )
or block_size > len(__lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__lowerCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase :Dict = json.load(__lowerCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCamelCase , __lowerCamelCase ): # list is the only sequence type supported in JSON
try:
UpperCamelCase :Any = set().union(*[row.keys() for row in dataset] )
UpperCamelCase :Dict = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
UpperCamelCase :Optional[int] = pa.Table.from_pydict(__lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__lowerCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
batch_idx += 1
| 590 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ : List[str] = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 692 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__a : Dict , **__a : List[Any] ):
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 692 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=1_3 , __UpperCamelCase : str=7 , __UpperCamelCase : Any=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Any=True , __UpperCamelCase : Optional[int]=9_9 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : Union[str, Any]=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Union[str, Any]=3_7 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : List[Any]=5_1_2 , __UpperCamelCase : str=1_6 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : int=False , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Tuple="None" , __UpperCamelCase : Any=3 , __UpperCamelCase : Any=4 , __UpperCamelCase : int=None , )->Optional[int]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = relative_attention
_UpperCAmelCase = position_biased_input
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = scope
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any )->Dict:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase__ ( self : List[str] , __UpperCamelCase : Dict )->Union[str, Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] )->Tuple:
_UpperCAmelCase = DebertaVaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCAmelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCAmelCase = model(__UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase__ ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] )->Tuple:
_UpperCAmelCase = DebertaVaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] )->str:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DebertaVaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCamelCase )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DebertaVaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : int , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : str )->Dict:
_UpperCAmelCase = DebertaVaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = DebertaVaForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any )->List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : str )->str:
_UpperCAmelCase = DebertaVaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : Any )->List[str]:
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : Tuple )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : Dict )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : str )->str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCamelCase )
def lowercase__ ( self : Optional[int] )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__UpperCamelCase )
@slow
def lowercase__ ( self : Union[str, Any] )->Union[str, Any]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DebertaVaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def lowercase__ ( self : int )->Any:
pass
@slow
def lowercase__ ( self : Any )->Union[str, Any]:
_UpperCAmelCase = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_UpperCAmelCase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
# compare the actual values for a slice.
_UpperCAmelCase = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 95 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _a :
"""simple docstring"""
@staticmethod
def lowercase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] )->List[str]:
pass
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A : Dict = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : str )->Tuple:
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model=__UpperCamelCase , tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
_UpperCAmelCase = '''What is the placebo?'''
_UpperCAmelCase = [
{
'''image''': load_image(__UpperCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowercase__ ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] )->Any:
_UpperCAmelCase = dqa_pipeline(__UpperCamelCase , top_k=2 )
self.assertEqual(
__UpperCamelCase , [
[
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : str )->Union[str, Any]:
_UpperCAmelCase = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''How many cats are there?'''
_UpperCAmelCase = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 3_8, '''end''': 3_9},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 3_8, '''end''': 4_0},
]
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
_UpperCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
_UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , words=__UpperCamelCase , boxes=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : Union[str, Any] )->Optional[Any]:
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_UpperCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_UpperCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : int )->Optional[int]:
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=5_0 , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_UpperCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_UpperCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self : Dict )->Tuple:
_UpperCAmelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
_UpperCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
_UpperCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
]
]
* 2 , )
_UpperCAmelCase = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_UpperCAmelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self : List[str] )->Any:
_UpperCAmelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , max_seq_len=5_0 , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_UpperCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
_UpperCAmelCase = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_UpperCAmelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
@slow
@require_torch
def lowercase__ ( self : int )->str:
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowercase__ ( self : Tuple )->Union[str, Any]:
pass
| 95 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __magic_name__ : int , __magic_name__ : Optional[Any]=7 , __magic_name__ : Tuple=3 , __magic_name__ : Tuple=18 , __magic_name__ : Dict=30 , __magic_name__ : Any=400 , __magic_name__ : List[str]=True , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Tuple=True , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = size if size is not None else {"shortest_edge": 20}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_flip_channel_order
def __A ( self : int ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = MobileViTImageProcessor if is_vision_available() else None
def __A ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = MobileViTImageProcessingTester(self )
@property
def __A ( self : Tuple ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , "do_resize" ) )
self.assertTrue(hasattr(__magic_name__ , "size" ) )
self.assertTrue(hasattr(__magic_name__ , "do_center_crop" ) )
self.assertTrue(hasattr(__magic_name__ , "center_crop" ) )
self.assertTrue(hasattr(__magic_name__ , "do_flip_channel_order" ) )
def __A ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __A ( self : Any ) -> Any:
pass
def __A ( self : Dict ) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : List[str] ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : Optional[int] ) -> Union[str, Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 140 | import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A : Dict = threading.Lock()
A : Optional[logging.Handler] = None
A : Optional[int] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A : List[str] = logging.WARNING
A : str = True
def a__ ( ):
SCREAMING_SNAKE_CASE_ = os.getenv("TRANSFORMERS_VERBOSITY" , __UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def a__ ( ):
return __name__.split("." )[0]
def a__ ( ):
return logging.getLogger(_get_library_name() )
def a__ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE_ = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE_ = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE_ = False
def a__ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE_ = None
def a__ ( ):
return log_levels
def a__ ( __UpperCamelCase = None ):
if name is None:
SCREAMING_SNAKE_CASE_ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__UpperCamelCase )
def a__ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( __UpperCamelCase ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( __UpperCamelCase ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__UpperCamelCase )
def a__ ( __UpperCamelCase ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__UpperCamelCase )
def a__ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE_ = False
def a__ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE_ = True
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE_ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(__UpperCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__UpperCamelCase )
def a__ ( self , *__UpperCamelCase , **__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , __UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*__UpperCamelCase , **__UpperCamelCase )
A : Dict = warning_advice
@functools.lru_cache(__UpperCamelCase )
def a__ ( self , *__UpperCamelCase , **__UpperCamelCase ):
self.warning(*__UpperCamelCase , **__UpperCamelCase )
A : Union[str, Any] = warning_once
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , *__magic_name__ : Optional[int] , **__magic_name__ : int ) -> int: # pylint: disable=unused-argument
SCREAMING_SNAKE_CASE_ = args[0] if args else None
def __iter__( self : str ) -> Optional[int]:
return iter(self._iterator )
def __getattr__( self : Dict , __magic_name__ : int ) -> Optional[int]:
def empty_fn(*__magic_name__ : Tuple , **__magic_name__ : Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Dict:
return self
def __exit__( self : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] ) -> List[str]:
return
class lowerCamelCase :
"""simple docstring"""
def __call__( self : List[Any] , *__magic_name__ : List[str] , **__magic_name__ : Tuple ) -> Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*__magic_name__ , **__magic_name__ )
else:
return EmptyTqdm(*__magic_name__ , **__magic_name__ )
def __A ( self : Tuple , *__magic_name__ : List[Any] , **__magic_name__ : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__magic_name__ , **__magic_name__ )
def __A ( self : Optional[Any] ) -> Dict:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A : Optional[int] = _tqdm_cls()
def a__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE_ = True
hf_hub_utils.enable_progress_bars()
def a__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE_ = False
hf_hub_utils.disable_progress_bars()
| 140 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :str=3 ,_UpperCamelCase :Optional[int]=1_8 ,_UpperCamelCase :Any=3_0 ,_UpperCamelCase :Optional[Any]=4_0_0 ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :List[str]=None ,_UpperCamelCase :Tuple=True ,):
snake_case_ : Dict = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : int = parent
snake_case_ : str = batch_size
snake_case_ : Any = num_channels
snake_case_ : List[str] = image_size
snake_case_ : str = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Optional[int] = do_resize
snake_case_ : int = size
snake_case_ : List[Any] = apply_ocr
def a__ ( self :List[Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self :Dict ):
snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self :List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self :Union[str, Any] ):
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) )
def a__ ( self :Optional[Any] ):
snake_case_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 )
self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} )
def a__ ( self :Dict ):
pass
def a__ ( self :Any ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_UpperCamelCase )
self.assertIsInstance(encoding.boxes ,_UpperCamelCase )
# Test batched
snake_case_ : Tuple = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Optional[Any] ):
# Initialize image_processing
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case_ : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : List[str] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :List[str] ):
# Initialize image_processing
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Optional[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :List[str] ):
# with apply_OCR = True
snake_case_ : List[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case_ : Optional[int] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
snake_case_ : Optional[Any] = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case_ : Dict = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_UpperCamelCase )
self.assertListEqual(encoding.boxes ,_UpperCamelCase )
# with apply_OCR = False
snake_case_ : str = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase )
snake_case_ : str = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) | 703 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
__A : str = tuple[int, int]
class __UpperCamelCase :
def __init__( self :Union[str, Any] ,_UpperCamelCase :set[int] ,_UpperCamelCase :Mapping[EdgeT, int] ):
snake_case_ : set[int] = vertices
snake_case_ : dict[EdgeT, int] = {
(min(_UpperCamelCase ), max(_UpperCamelCase )): weight for edge, weight in edges.items()
}
def a__ ( self :Tuple ,_UpperCamelCase :EdgeT ,_UpperCamelCase :int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
snake_case_ : str = weight
def a__ ( self :Tuple ):
snake_case_ : Graph = Graph({min(self.vertices )} ,{} )
snake_case_ : EdgeT
snake_case_ : int
snake_case_ : EdgeT
snake_case_ : int
while len(subgraph.vertices ) < len(self.vertices ):
snake_case_ : int = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
snake_case_ : Dict = edge
snake_case_ : Dict = weight
subgraph.add_edge(_UpperCamelCase ,_UpperCamelCase )
return subgraph
def UpperCAmelCase ( lowerCamelCase_ :str = "p107_network.txt" ):
'''simple docstring'''
snake_case_ : str = os.path.abspath(os.path.dirname(lowerCamelCase_ ) )
snake_case_ : str = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : dict[EdgeT, int] = {}
snake_case_ : list[str]
snake_case_ : int
snake_case_ : int
with open(lowerCamelCase_ ) as f:
snake_case_ : Optional[int] = f.read().strip().split("""\n""" )
snake_case_ : Any = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(lowerCamelCase_ ) ):
for edgea in range(lowerCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
snake_case_ : str = int(adjaceny_matrix[edgea][edgea] )
snake_case_ : Graph = Graph(set(range(len(lowerCamelCase_ ) ) ) , lowerCamelCase_ )
snake_case_ : Graph = graph.prims_algorithm()
snake_case_ : int = sum(graph.edges.values() )
snake_case_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }') | 267 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = '''▁'''
__UpperCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__UpperCamelCase : str = {'''vinai/bartpho-syllable''': 1024}
class a ( UpperCamelCase_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = monolingual_vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCAmelCase = {}
lowerCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase = cnt
cnt += 1
with open(A_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
lowerCAmelCase = line.strip().split()[0]
lowerCAmelCase = len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase = len(self.fairseq_tokens_to_ids )
lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''''''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 4 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : str , A_ : Union[str, Any] , A_ : List[Any]=3 , A_ : Union[str, Any]=3_2 , A_ : List[Any]=3 , A_ : Any=1_0 , A_ : List[Any]=[8, 1_6, 3_2, 6_4] , A_ : List[Any]=[1, 1, 2, 1] , A_ : Tuple=True , A_ : Dict=True , A_ : int="relu" , A_ : Optional[int]=3 , A_ : List[Any]=None , A_ : Optional[int]=["stage2", "stage3", "stage4"] , A_ : str=[2, 3, 4] , A_ : int=1 , ):
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : str = embeddings_size
lowerCAmelCase_ : Optional[Any] = hidden_sizes
lowerCAmelCase_ : Any = depths
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : Any = use_labels
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : str = num_labels
lowerCAmelCase_ : Tuple = scope
lowerCAmelCase_ : Union[str, Any] = len(A_)
lowerCAmelCase_ : Tuple = out_features
lowerCAmelCase_ : str = out_indices
lowerCAmelCase_ : Optional[Any] = num_groups
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels)
lowerCAmelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : str):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCAmelCase__ ( self : Any , A_ : Optional[int] , A_ : Any , A_ : Optional[Any]):
lowerCAmelCase_ : Any = BitModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Tuple = model(A_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ ( self : List[str] , A_ : List[Any] , A_ : List[str] , A_ : Any):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : Optional[int] = BitForImageClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : int , A_ : int , A_ : str , A_ : Dict):
lowerCAmelCase_ : Any = BitBackbone(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : List[str] = model(A_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = BitBackbone(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[Any] = model(A_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = config_and_inputs
lowerCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_a = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Tuple = BitModelTester(self)
lowerCAmelCase_ : str = ConfigTester(self , config_class=A_ , has_text_modality=A_)
def UpperCAmelCase__ ( self : Optional[Any]):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Tuple):
return
@unittest.skip(reason='''Bit does not output attentions''')
def UpperCAmelCase__ ( self : Optional[Any]):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''')
def UpperCAmelCase__ ( self : Any):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''')
def UpperCAmelCase__ ( self : Dict):
pass
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[Any] = model_class(A_)
lowerCAmelCase_ : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=A_)
for name, module in model.named_modules():
if isinstance(A_ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def UpperCAmelCase__ ( self : Tuple):
def check_hidden_states_output(A_ : Dict , A_ : List[Any] , A_ : Optional[int]):
lowerCAmelCase_ : List[str] = model_class(A_)
model.to(A_)
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(A_ , A_))
lowerCAmelCase_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(A_) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : int = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase_ : Optional[Any] = layer_type
lowerCAmelCase_ : str = True
check_hidden_states_output(A_ , A_ , A_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : Optional[int] = True
check_hidden_states_output(A_ , A_ , A_)
@unittest.skip(reason='''Bit does not use feedforward chunking''')
def UpperCAmelCase__ ( self : Any):
pass
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_)
@slow
def UpperCAmelCase__ ( self : Optional[int]):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Dict = BitModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCamelCase( ):
lowerCAmelCase_ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any]):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(A_)
lowerCAmelCase_ : Optional[Any] = self.default_image_processor
lowerCAmelCase_ : List[Any] = prepare_img()
lowerCAmelCase_ : Optional[Any] = image_processor(images=A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int = model(**A_)
# verify the logits
lowerCAmelCase_ : str = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , A_)
lowerCAmelCase_ : Any = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(A_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4))
@require_torch
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = (BitBackbone,) if is_torch_available() else ()
_a = BitConfig
_a = False
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Optional[int] = BitModelTester(self)
| 171 | 0 |
'''simple docstring'''
_lowerCAmelCase : List[str] = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_lowerCAmelCase : List[Any] = {value: key for key, value in MORSE_CODE_DICT.items()}
def __UpperCamelCase ( _A : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __UpperCamelCase ( _A : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = 'Morse code here!'
print(_A )
lowerCAmelCase : List[Any] = encrypt(_A )
print(_A )
lowerCAmelCase : Optional[int] = decrypt(_A )
print(_A )
if __name__ == "__main__":
main()
| 721 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646 | 0 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = graph
self._normalize_graph(A_ , A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ , A_ )-> Tuple:
'''simple docstring'''
if sources is int:
UpperCamelCase = [sources]
if sinks is int:
UpperCamelCase = [sinks]
if len(A_ ) == 0 or len(A_ ) == 0:
return
UpperCamelCase = sources[0]
UpperCamelCase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A_ ) > 1 or len(A_ ) > 1:
UpperCamelCase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCamelCase = max_input_flow
UpperCamelCase = 0
UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase = max_input_flow
UpperCamelCase = size - 1
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = algorithm(self )
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = flow_network
UpperCamelCase = flow_network.verticesCount
UpperCamelCase = flow_network.sourceIndex
UpperCamelCase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase = flow_network.graph
UpperCamelCase = False
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
if not self.executed:
self._algorithm()
UpperCamelCase = True
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> List[str]:
'''simple docstring'''
super().__init__(A_ )
# use this to save your result
UpperCamelCase = -1
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase = [0] * self.verticies_count
UpperCamelCase = [0] * self.verticies_count
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase = 0
while i < len(A_ ):
UpperCamelCase = vertices_list[i]
UpperCamelCase = self.heights[vertex_index]
self.process_vertex(A_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A_ ) )
UpperCamelCase = 0
else:
i += 1
UpperCamelCase = sum(self.preflow[self.source_index] )
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A_ , A_ )
self.relabel(A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> str:
'''simple docstring'''
UpperCamelCase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase = self.heights[to_index]
if min_height is not None:
UpperCamelCase = min_height + 1
if __name__ == "__main__":
lowerCAmelCase : int = [0]
lowerCAmelCase : Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCAmelCase : List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCAmelCase : List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCAmelCase : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 3 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "OwlViTImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
SCREAMING_SNAKE_CASE__ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A ))
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = input_ids
SCREAMING_SNAKE_CASE__ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
SCREAMING_SNAKE_CASE__ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__A , **__A )
def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase):
a_ = "timesformer"
def __init__( self : Tuple , _A : List[Any]=2_24 , _A : Optional[int]=16 , _A : Optional[Any]=3 , _A : int=8 , _A : Optional[Any]=7_68 , _A : str=12 , _A : Dict=12 , _A : Optional[int]=30_72 , _A : Any="gelu" , _A : List[str]=0.0 , _A : Tuple=0.0 , _A : Optional[int]=0.02 , _A : Optional[Any]=1e-6 , _A : Any=True , _A : Tuple="divided_space_time" , _A : Any=0 , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__(**_A )
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Any = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : str = attention_type
UpperCAmelCase_ : List[Any] = drop_path_rate
| 216 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class snake_case__ :
def __init__( self : List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = {}
def A ( self : str , _A : str ) -> None:
UpperCAmelCase_ : Union[str, Any] = {}
def A ( self : Dict , _A : str , _A : str , _A : float ) -> None:
if nodea not in self.connections:
self.add_node(_A )
if nodea not in self.connections:
self.add_node(_A )
UpperCAmelCase_ : Optional[Any] = probability
def A ( self : Tuple ) -> list[str]:
return list(self.connections )
def A ( self : Optional[int] , _A : str ) -> str:
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : str = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( A : str , A : list[tuple[str, str, float]] , A : int ) -> dict[str, int]:
UpperCAmelCase_ : int = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A , A , A )
UpperCAmelCase_ : List[str] = Counter(graph.get_nodes() )
UpperCAmelCase_ : int = start
for _ in range(A ):
UpperCAmelCase_ : Union[str, Any] = graph.transition(A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 1 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[str]=5_6 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : List[Any]="gelu_new" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Tuple=5_1_2 , UpperCAmelCase__ : Union[str, Any]=1_6 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : List[str]="block_sparse" , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=3 , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
lowerCAmelCase = rescale_embeddings
lowerCAmelCase = attention_type
lowerCAmelCase = use_bias
lowerCAmelCase = block_size
lowerCAmelCase = num_random_blocks
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : int = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : int ) -> str:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Dict ) -> Dict:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Optional[int] ) -> int:
super().test_hidden_states_output()
@slow
def __UpperCAmelCase ( self : str ) -> Dict:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : str ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
return model(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=1E-5 , UpperCAmelCase__ : Union[str, Any]="outputs" , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
| 133 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__snake_case ="""\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__snake_case ="""\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__snake_case ="""
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict="auto" , UpperCAmelCase__ : Union[str, Any]=-1 , UpperCAmelCase__ : int=0.9 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Optional[int]=5_0_0 , UpperCAmelCase__ : List[str]="gpt2-large" , UpperCAmelCase__ : Any=-1 , UpperCAmelCase__ : int=1_0_2_4 , UpperCAmelCase__ : Union[str, Any]=2_5 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=2_5 , ) -> Tuple:
lowerCAmelCase = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 133 | 1 |
def UpperCamelCase ( snake_case__ : int ):
'''simple docstring'''
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase__ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
lowerCamelCase : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase : List[str] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowercase ( self , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
__snake_case :Union[str, Any] = ZeroShotClassificationPipeline(
model=a__ , tokenizer=a__ , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowercase ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
__snake_case :Tuple = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ )], """scores""": [ANY(a__ )]} )
# No kwarg
__snake_case :Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ )], """scores""": [ANY(a__ )]} )
__snake_case :Tuple = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ )], """scores""": [ANY(a__ )]} )
__snake_case :List[str] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ ), ANY(a__ )], """scores""": [ANY(a__ ), ANY(a__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__snake_case :Optional[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ ), ANY(a__ )], """scores""": [ANY(a__ ), ANY(a__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__snake_case :List[str] = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ )], """scores""": [ANY(a__ )]} )
# https://github.com/huggingface/transformers/issues/13846
__snake_case :Union[str, Any] = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
a__ , [
{"""sequence""": ANY(a__ ), """labels""": [ANY(a__ ), ANY(a__ )], """scores""": [ANY(a__ ), ANY(a__ )]}
for i in range(1 )
] , )
__snake_case :Tuple = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
a__ , [
{"""sequence""": ANY(a__ ), """labels""": [ANY(a__ ), ANY(a__ )], """scores""": [ANY(a__ ), ANY(a__ )]}
for i in range(2 )
] , )
with self.assertRaises(a__ ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(a__ ):
classifier(a__ , candidate_labels="""politics""" )
with self.assertRaises(a__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(a__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=a__ )
with self.assertRaises(a__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(a__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=a__ , )
self.run_entailment_id(a__ )
def __lowercase ( self , a__ ) -> List[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = zero_shot_classifier.model.config
__snake_case :Optional[Any] = config.labelaid
__snake_case :Dict = zero_shot_classifier.entailment_id
__snake_case :List[Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__snake_case :Optional[int] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__snake_case :List[Any] = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__snake_case :str = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__snake_case :Union[str, Any] = original_labelaid
self.assertEqual(a__ , zero_shot_classifier.entailment_id )
@require_torch
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :str = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_00 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Dict = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
__snake_case :Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[str] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
__snake_case :List[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Dict = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
__snake_case :Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} , )
__snake_case :Union[str, Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=a__ , )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Tuple = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
__snake_case :Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} , )
__snake_case :Union[str, Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=a__ , )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 291 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCAmelCase ( _lowercase : int = 2_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
lowerCAmelCase_ = [0]
lowerCAmelCase_ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowerCAmelCase_ = 0
# the area corresponding to the grid that gives the product closest to target
lowerCAmelCase_ = 0
# an estimate of b, using the quadratic formula
lowerCAmelCase_ = 42
# the largest integer less than b_estimate
lowerCAmelCase_ = 42
# the largest integer less than b_estimate
lowerCAmelCase_ = 42
# the triangle number corresponding to b_floor
lowerCAmelCase_ = 42
# the triangle number corresponding to b_ceil
lowerCAmelCase_ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowerCAmelCase_ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowerCAmelCase_ = floor(_lowercase )
lowerCAmelCase_ = ceil(_lowercase )
lowerCAmelCase_ = triangle_numbers[b_floor]
lowerCAmelCase_ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase_ = triangle_b_first_guess * triangle_a
lowerCAmelCase_ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase_ = triangle_b_second_guess * triangle_a
lowerCAmelCase_ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""") | 552 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowercase_ = True
from torch.cuda.amp import autocast
lowercase_ = logging.getLogger(__name__)
def UpperCAmelCase ( _lowercase : Optional[Any]=None , _lowercase : str=None ) -> List[str]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class __a :
lowerCamelCase : str =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase : Optional[str] =field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase : Optional[bool] =field(
default=__snake_case , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCamelCase : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
lowerCamelCase : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
lowerCamelCase : Optional[float] =field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
lowerCamelCase : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
lowerCamelCase : Optional[float] =field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
lowerCamelCase : Optional[float] =field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class __a :
lowerCamelCase : Optional[str] =field(
default=__snake_case , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase : Optional[str] =field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase : bool =field(
default=__snake_case , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCamelCase : Optional[int] =field(
default=__snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase : Optional[int] =field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase : Optional[int] =field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
lowerCamelCase : List[str] =list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class __a :
lowerCamelCase : WavaVecaProcessor
lowerCamelCase : Union[bool, str] =True
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[int] =None
def __call__( self , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = [{'''input_values''': feature['''input_values''']} for feature in features]
lowerCAmelCase_ = [{'''input_ids''': feature['''labels''']} for feature in features]
lowerCAmelCase_ = self.processor.pad(
UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowerCAmelCase_ = self.processor.pad(
labels=UpperCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
lowerCAmelCase_ = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
lowerCAmelCase_ = labels
return batch
class __a ( __snake_case ):
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
model.train()
lowerCAmelCase_ = self._prepare_inputs(UpperCAmelCase )
if self.use_amp:
with autocast():
lowerCAmelCase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
else:
lowerCAmelCase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase_ = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase )
else:
loss.backward()
return loss.detach()
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowerCAmelCase_ = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
lowerCAmelCase_ = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
lowerCAmelCase_ = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(_lowercase : List[str] ):
lowerCAmelCase_ = re.sub(_lowercase , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
lowerCAmelCase_ = train_dataset.map(_lowercase , remove_columns=['''sentence'''] )
lowerCAmelCase_ = eval_dataset.map(_lowercase , remove_columns=['''sentence'''] )
def extract_all_chars(_lowercase : str ):
lowerCAmelCase_ = ''' '''.join(batch['''text'''] )
lowerCAmelCase_ = list(set(_lowercase ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowerCAmelCase_ = train_dataset.map(
_lowercase , batched=_lowercase , batch_size=-1 , keep_in_memory=_lowercase , remove_columns=train_dataset.column_names , )
lowerCAmelCase_ = train_dataset.map(
_lowercase , batched=_lowercase , batch_size=-1 , keep_in_memory=_lowercase , remove_columns=eval_dataset.column_names , )
lowerCAmelCase_ = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
lowerCAmelCase_ = {v: k for k, v in enumerate(_lowercase )}
lowerCAmelCase_ = vocab_dict[''' ''']
del vocab_dict[" "]
lowerCAmelCase_ = len(_lowercase )
lowerCAmelCase_ = len(_lowercase )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(_lowercase , _lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
lowerCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=_lowercase , return_attention_mask=_lowercase )
lowerCAmelCase_ = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
lowerCAmelCase_ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowerCAmelCase_ = min(len(_lowercase ) , data_args.max_train_samples )
lowerCAmelCase_ = train_dataset.select(range(_lowercase ) )
if data_args.max_val_samples is not None:
lowerCAmelCase_ = eval_dataset.select(range(data_args.max_val_samples ) )
lowerCAmelCase_ = torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_lowercase : Union[str, Any] ):
lowerCAmelCase_ , lowerCAmelCase_ = torchaudio.load(batch['''path'''] )
lowerCAmelCase_ = resampler(_lowercase ).squeeze().numpy()
lowerCAmelCase_ = 1_6_0_0_0
lowerCAmelCase_ = batch['''text''']
return batch
lowerCAmelCase_ = train_dataset.map(
_lowercase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowerCAmelCase_ = eval_dataset.map(
_lowercase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_lowercase : str ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
lowerCAmelCase_ = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(_lowercase )
return batch
lowerCAmelCase_ = train_dataset.map(
_lowercase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , )
lowerCAmelCase_ = eval_dataset.map(
_lowercase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowerCAmelCase_ = datasets.load_metric('''wer''' )
def compute_metrics(_lowercase : Optional[int] ):
lowerCAmelCase_ = pred.predictions
lowerCAmelCase_ = np.argmax(_lowercase , axis=-1 )
lowerCAmelCase_ = processor.tokenizer.pad_token_id
lowerCAmelCase_ = processor.batch_decode(_lowercase )
# we do not want to group tokens when computing the metrics
lowerCAmelCase_ = processor.batch_decode(pred.label_ids , group_tokens=_lowercase )
lowerCAmelCase_ = wer_metric.compute(predictions=_lowercase , references=_lowercase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowerCAmelCase_ = DataCollatorCTCWithPadding(processor=_lowercase , padding=_lowercase )
# Initialize our Trainer
lowerCAmelCase_ = CTCTrainer(
model=_lowercase , data_collator=_lowercase , args=_lowercase , compute_metrics=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCAmelCase_ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowerCAmelCase_ = model_args.model_name_or_path
else:
lowerCAmelCase_ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowerCAmelCase_ = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
lowerCAmelCase_ = train_result.metrics
lowerCAmelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCAmelCase_ = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
lowerCAmelCase_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase_ = trainer.evaluate()
lowerCAmelCase_ = data_args.max_val_samples if data_args.max_val_samples is not None else len(_lowercase )
lowerCAmelCase_ = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
return results
if __name__ == "__main__":
main() | 552 | 1 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__UpperCamelCase = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def UpperCamelCase_( _A :str )-> Union[str, Any]:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
__UpperCamelCase = parser.parse_args()
if args.check_lib:
__UpperCamelCase = importlib.import_module('transformers')
__UpperCamelCase = Path(transformers_module.__file__).parent
else:
__UpperCamelCase = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 701 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = FlaxAutoencoderKL
@property
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.uniform(snake_case , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
| 185 | 0 |
import os
def __A(lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = len(grid[0] )
_UpperCamelCase = len(lowerCAmelCase )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowerCAmelCase ):
for j in range(n_rows - 3 ):
_UpperCamelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_UpperCamelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_UpperCamelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_UpperCamelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_UpperCamelCase = max(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if max_product > largest:
_UpperCamelCase = max_product
return largest
def __A() -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
with open(os.path.dirname(lowerCAmelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
_UpperCamelCase = [[int(lowerCAmelCase ) for i in grid[j]] for j in range(len(lowerCAmelCase ) )]
return largest_product(lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 612 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __A(lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.path.join(args.tf_model_dir , """parameters.json""" )
_UpperCamelCase = json.loads(open(lowerCAmelCase ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith(""".pt""" ):
_UpperCamelCase = args.output + """.pt"""
_UpperCamelCase = OrderedDict()
with tf.device("""/CPU:0""" ):
_UpperCamelCase = tf.train.load_checkpoint(args.tf_model_dir )
_UpperCamelCase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_UpperCamelCase = reader.get_tensor(lowerCAmelCase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
_UpperCamelCase = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
_UpperCamelCase = 8
_UpperCamelCase = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/moe""" ):
_UpperCamelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/softmlp/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
_UpperCamelCase = key_name[-9:-7]
for i in range(1_6 ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
_UpperCamelCase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/mlp""" ):
_UpperCamelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/p1/bias""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/p2/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/p2/bias""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/ln""" ):
_UpperCamelCase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.norm.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/g""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.norm.weight""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/att""" ):
_UpperCamelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
_UpperCamelCase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_UpperCamelCase = state[:, 0, :, :]
_UpperCamelCase = state[:, 1, :, :]
_UpperCamelCase = state[:, 2, :, :]
_UpperCamelCase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
_UpperCamelCase = torch.tensor(lowerCAmelCase )
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
_UpperCamelCase = torch.tensor(lowerCAmelCase )
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/o/kernel""" ):
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
_UpperCamelCase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/an""" ):
_UpperCamelCase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_UpperCamelCase = """model.blocks.%d.self_attn.norm.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/g""" ):
_UpperCamelCase = """model.blocks.%d.self_attn.norm.weight""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
_UpperCamelCase = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
_UpperCamelCase = """model.%s.weight""" % nlayer
_UpperCamelCase = vnp.copy() # same in embedded
_UpperCamelCase = torch.tensor(lowerCAmelCase )
if key_name.startswith("""model/wte""" ):
_UpperCamelCase = """lm_head.weight"""
_UpperCamelCase = vnp.copy() # same in embedded
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/wob""" ):
_UpperCamelCase = """final_logits_bias"""
_UpperCamelCase = vnp.copy() # same in embedded
_UpperCamelCase = state.reshape((1, -1) )
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense/kernel":
_UpperCamelCase = """model.last_project.weight"""
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense_1/bias":
_UpperCamelCase = """model.last_project.bias"""
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
torch.save(lowerCAmelCase , args.output )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowerCamelCase__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 612 | 1 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowerCAmelCase__ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowerCAmelCase__ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> Dict:
UpperCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCAmelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCAmelCase = evaluate(dataset=lowerCAmelCase__ , predictions=lowerCAmelCase__ )
return score
| 1 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def a_ ( *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Tuple):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class a__ ( unittest.TestCase ):
@require_torch
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
__UpperCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : Dict = image_classifier(UpperCamelCase_ , candidate_labels=["a", "b", "c"])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCamelCase_) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
__UpperCAmelCase : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
] , )
@require_tf
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf")
__UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : str = image_classifier(UpperCamelCase_ , candidate_labels=["a", "b", "c"])
self.assertEqual(
nested_simplify(UpperCamelCase_) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
__UpperCAmelCase : str = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
] , )
@slow
@require_torch
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
__UpperCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : Optional[int] = image_classifier(UpperCamelCase_ , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__UpperCAmelCase : Dict = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf")
# This is an image of 2 cats with remotes and no planes
__UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : str = image_classifier(UpperCamelCase_ , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__UpperCAmelCase : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 77 | """simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
stooge(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return arr
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase_ , lowercase_ : List[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase_ : str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , i + t , (__SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 425 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : int = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[str] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
SCREAMING_SNAKE_CASE : Any = "3.0.12"
SCREAMING_SNAKE_CASE : Tuple = None
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
global _logger
lowercase_ :List[Any] = _logger or logging.getLogger(__name__ )
return _logger
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = lock_file
return None
def __str__( self ):
lowercase_ :Any = f"The file lock '{self.lock_file}' could not be acquired."
return temp
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :Dict = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
self.lock.release()
return None
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=-1 , UpperCamelCase_=None ):
lowercase_ :int = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase_ :str = self.hash_filename_if_too_long(UpperCamelCase_ , UpperCamelCase_ )
# The path to the lock file.
lowercase_ :List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase_ :List[str] = None
# The default timeout value.
lowercase_ :List[str] = timeout
# We use this lock primarily for the lock counter.
lowercase_ :str = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase_ :Dict = 0
return None
@property
def UpperCamelCase ( self ):
return self._lock_file
@property
def UpperCamelCase ( self ):
return self._timeout
@timeout.setter
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[Any] = float(UpperCamelCase_ )
return None
def UpperCamelCase ( self ):
raise NotImplementedError()
def UpperCamelCase ( self ):
raise NotImplementedError()
@property
def UpperCamelCase ( self ):
return self._lock_file_fd is not None
def UpperCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase_ :str = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase_ :Optional[Any] = id(self )
lowercase_ :str = self._lock_file
lowercase_ :int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(UpperCamelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase_ :Optional[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase ( self , UpperCamelCase_=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase_ :List[Any] = id(self )
lowercase_ :str = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
lowercase_ :Any = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
self.release()
return None
def __del__( self ):
self.release(force=UpperCamelCase_ )
return None
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[int] = os.path.basename(UpperCamelCase_ )
if len(UpperCamelCase_ ) > max_length and max_length > 0:
lowercase_ :Union[str, Any] = os.path.dirname(UpperCamelCase_ )
lowercase_ :List[str] = str(hash(UpperCamelCase_ ) )
lowercase_ :Dict = filename[: max_length - len(UpperCamelCase_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(UpperCamelCase_ , UpperCamelCase_ )
else:
return path
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=-1 , UpperCamelCase_=None ):
from .file_utils import relative_to_absolute_path
super().__init__(UpperCamelCase_ , timeout=UpperCamelCase_ , max_filename_length=UpperCamelCase_ )
lowercase_ :Dict = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCamelCase ( self ):
lowercase_ :int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase_ :str = os.open(self._lock_file , UpperCamelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCamelCase_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(UpperCamelCase_ )
else:
lowercase_ :int = fd
return None
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self._lock_file_fd
lowercase_ :List[str] = None
msvcrt.locking(UpperCamelCase_ , msvcrt.LK_UNLCK , 1 )
os.close(UpperCamelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=-1 , UpperCamelCase_=None ):
lowercase_ :Dict = os.statvfs(os.path.dirname(UpperCamelCase_ ) ).f_namemax
super().__init__(UpperCamelCase_ , timeout=UpperCamelCase_ , max_filename_length=UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase_ :Tuple = os.open(self._lock_file , UpperCamelCase_ )
try:
fcntl.flock(UpperCamelCase_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCamelCase_ )
else:
lowercase_ :Tuple = fd
return None
def UpperCamelCase ( self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase_ :List[str] = self._lock_file_fd
lowercase_ :Optional[Any] = None
fcntl.flock(UpperCamelCase_ , fcntl.LOCK_UN )
os.close(UpperCamelCase_ )
return None
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def UpperCamelCase ( self ):
lowercase_ :List[str] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase_ :Tuple = os.open(self._lock_file , UpperCamelCase_ )
except OSError:
pass
else:
lowercase_ :List[str] = fd
return None
def UpperCamelCase ( self ):
os.close(self._lock_file_fd )
lowercase_ :Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : List[Any] = None
if msvcrt:
SCREAMING_SNAKE_CASE : Union[str, Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : Tuple = UnixFileLock
else:
SCREAMING_SNAKE_CASE : Optional[int] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 704 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=4 , ):
lowercase_ :Union[str, Any] = parent
lowercase_ :int = batch_size
lowercase_ :int = seq_length
lowercase_ :str = is_training
lowercase_ :Dict = use_attention_mask
lowercase_ :List[Any] = use_token_type_ids
lowercase_ :str = use_labels
lowercase_ :str = vocab_size
lowercase_ :Optional[int] = hidden_size
lowercase_ :Dict = num_hidden_layers
lowercase_ :List[str] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Union[str, Any] = hidden_act
lowercase_ :Optional[int] = hidden_dropout_prob
lowercase_ :Tuple = attention_probs_dropout_prob
lowercase_ :int = max_position_embeddings
lowercase_ :List[Any] = type_vocab_size
lowercase_ :Any = type_sequence_label_size
lowercase_ :Tuple = initializer_range
lowercase_ :Any = num_choices
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[str] = None
if self.use_attention_mask:
lowercase_ :Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :List[Any] = None
if self.use_token_type_ids:
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[str] = config_and_inputs
lowercase_ :Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase ( self ):
lowercase_ :Dict = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Dict = config_and_inputs
lowercase_ :str = True
lowercase_ :Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : int =True
lowercase : Dict =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = FlaxBertModelTester(self )
@slow
def UpperCamelCase ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowercase_ :Dict = FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowercase_ :str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 441 | 0 |
'''simple docstring'''
import operator as op
def a__ ( lowercase : Dict ) -> Any:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = lambda lowercase, lowercase : int(x / y ) # noqa: E731 integer division operation
_UpperCamelCase = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ), '''Action'''.center(12 ), '''Stack''', sep=''' | ''' )
print('''-''' * (30 + len(lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('''push(''' + x + ''')''').ljust(12 ), ''','''.join(lowercase ), sep=''' | ''' )
else:
_UpperCamelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + b + ''')''').ljust(12 ), ''','''.join(lowercase ), sep=''' | ''' )
_UpperCamelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + a + ''')''').ljust(12 ), ''','''.join(lowercase ), sep=''' | ''' )
stack.append(
str(opr[x](int(lowercase ), int(lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('''push(''' + a + x + b + ''')''').ljust(12 ), ''','''.join(lowercase ), sep=''' | ''', )
return int(stack[0] )
if __name__ == "__main__":
lowercase__ : Dict = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 98 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( __lowercase ):
__a = """Salesforce/blip-image-captioning-base"""
__a = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
__a = """image_captioner"""
__a = AutoModelForVisionaSeq
__a = ["""image"""]
__a = ["""text"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ["""vision"""] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.model.generate(**_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip() | 518 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowercase__ = """CIDAS/clipseg-rd64-refined"""
lowercase__ = """image_segmenter"""
lowercase__ = CLIPSegForImageSegmentation
lowercase__ = ["""image""", """text"""]
lowercase__ = ["""image"""]
def __init__( self : Optional[Any], *lowerCamelCase : Tuple, **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self, ['''vision'''] )
super().__init__(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Optional[Any], lowerCamelCase : "Image", lowerCamelCase : str ):
'''simple docstring'''
return self.pre_processor(text=[label], images=[image], padding=lowerCamelCase, return_tensors='''pt''' )
def lowercase__ ( self : Any, lowerCamelCase : Optional[int] ):
'''simple docstring'''
with torch.no_grad():
lowercase__ = self.model(**lowerCamelCase ).logits
return logits
def lowercase__ ( self : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = outputs.cpu().detach().numpy()
lowercase__ = 0
lowercase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 0 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase =logging.get_logger(__name__)
def __a ( A , A , A ) -> List[str]:
'''simple docstring'''
A__ = os.path.abspath(A )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
A__ = tf.train.list_variables(A )
A__ = []
A__ = []
A__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
A__ = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
A__ = name[1:]
# figure out how many levels deep the name is
A__ = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(A )
# read data
A__ = tf.train.load_variable(A , A )
names.append("/".join(A ) )
arrays.append(A )
logger.info(f"""Read a total of {len(A ):,} layers""" )
# Sanity check
if len(set(A ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(A ) )})""" )
A__ = list(set(A ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(A , A ):
A__ = full_name.split("/" )
A__ = model
A__ = []
for i, m_name in enumerate(A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
A__ = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
A__ = getattr(A , "embeddings" )
A__ = getattr(A , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
A__ = getattr(A , "encoder" )
A__ = getattr(A , "layer" )
A__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
A__ = getattr(A , "pooler" )
A__ = getattr(A , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
A__ = getattr(A , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
A__ = getattr(A , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
A__ = getattr(A , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
A__ = getattr(A , "token_type_embeddings" )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append("weight" )
A__ = getattr(A , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
A__ = getattr(A , "attention" )
A__ = getattr(A , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
A__ = getattr(A , "attention" )
A__ = getattr(A , "output" )
A__ = getattr(A , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
A__ = getattr(A , "attention" )
A__ = getattr(A , "output" )
A__ = getattr(A , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
A__ = getattr(A , "output" )
A__ = getattr(A , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
A__ = getattr(A , "output" )
A__ = getattr(A , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
A__ = getattr(A , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
A__ = getattr(A , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
A__ = getattr(A , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
A__ = getattr(A , "intermediate" )
A__ = getattr(A , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
A__ = getattr(A , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
A__ = getattr(A , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
A__ = getattr(A , "weight" )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
A__ = ".".join(A )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , A ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , A ):
A__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
A__ = array.transpose()
if pointer.shape == array.shape:
A__ = torch.from_numpy(A )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def __a ( A , A , A ) -> Any:
'''simple docstring'''
logger.info(f"""Loading model based on config from {config_path}...""" )
A__ = BertConfig.from_json_file(A )
A__ = BertModel(A )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(A , A , A )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
__UpperCAmelCase =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 337 |
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
A__ = data
A__ = previous
A__ = next_node
def __str__( self ):
'''simple docstring'''
return f"""{self.data}"""
def lowercase_ ( self ):
'''simple docstring'''
return self.data
def lowercase_ ( self ):
'''simple docstring'''
return self.next
def lowercase_ ( self ):
'''simple docstring'''
return self.previous
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = head
def __iter__( self ):
'''simple docstring'''
return self
def lowercase_ ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
A__ = self.current.get_data()
A__ = self.current.get_next()
return value
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = None # First node in list
A__ = None # Last node in list
def __str__( self ):
'''simple docstring'''
A__ = self.head
A__ = []
while current is not None:
nodes.append(current.get_data() )
A__ = current.get_next()
return " ".join(str(UpperCamelCase__ ) for node in nodes )
def __contains__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.head
while current:
if current.get_data() == value:
return True
A__ = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def lowercase_ ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowercase_ ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.head is None:
A__ = node
A__ = node
else:
self.insert_before_node(self.head , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.insert_after_node(self.tail , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = Node(UpperCamelCase__ )
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.set_tail(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
A__ = node.previous
if node.get_previous() is None:
A__ = node_to_insert
else:
A__ = node_to_insert
A__ = node_to_insert
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
A__ = node.next
if node.get_next() is None:
A__ = node_to_insert
else:
A__ = node_to_insert
A__ = node_to_insert
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = 1
A__ = Node(UpperCamelCase__ )
A__ = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCamelCase__ , UpperCamelCase__ )
return
current_position += 1
A__ = node.next
self.insert_after_node(self.tail , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.head
while node:
if node.get_data() == item:
return node
A__ = node.get_next()
raise Exception("Node not found" )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if (node := self.get_node(UpperCamelCase__ )) is not None:
if node == self.head:
A__ = self.head.get_next()
if node == self.tail:
A__ = self.tail.get_previous()
self.remove_node_pointers(UpperCamelCase__ )
@staticmethod
def lowercase_ ( UpperCamelCase__ ):
'''simple docstring'''
if node.get_next():
A__ = node.previous
if node.get_previous():
A__ = node.next
A__ = None
A__ = None
def lowercase_ ( self ):
'''simple docstring'''
return self.head is None
def __a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 337 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _a ( A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_snake_case , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(_snake_case , "num_encoder_blocks" ) )
class _a :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=64 , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[16, 32, 64, 128] , _snake_case=[1, 4, 8, 16] , _snake_case=[1, 2, 4, 8] , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=3 , _snake_case=None , ):
_UpperCAmelCase =parent
_UpperCAmelCase =batch_size
_UpperCAmelCase =image_size
_UpperCAmelCase =num_channels
_UpperCAmelCase =num_encoder_blocks
_UpperCAmelCase =sr_ratios
_UpperCAmelCase =depths
_UpperCAmelCase =hidden_sizes
_UpperCAmelCase =downsampling_rates
_UpperCAmelCase =num_attention_heads
_UpperCAmelCase =is_training
_UpperCAmelCase =use_labels
_UpperCAmelCase =hidden_act
_UpperCAmelCase =hidden_dropout_prob
_UpperCAmelCase =attention_probs_dropout_prob
_UpperCAmelCase =initializer_range
_UpperCAmelCase =num_labels
_UpperCAmelCase =scope
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase =None
if self.use_labels:
_UpperCAmelCase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case )
_UpperCAmelCase =_UpperCAmelCase =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =self.num_labels
_UpperCAmelCase =SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_UpperCAmelCase =model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =1
_UpperCAmelCase =SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
_UpperCAmelCase =model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =config_and_inputs
_UpperCAmelCase ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
snake_case =(
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case =True
snake_case =False
snake_case =False
snake_case =False
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =SegformerModelTester(self )
_UpperCAmelCase =SegformerConfigTester(self , config_class=_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip("SegFormer does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase , _UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase =model_class(_snake_case )
_UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase =[*signature.parameters.keys()]
_UpperCAmelCase =["pixel_values"]
self.assertListEqual(arg_names[:1] , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase , _UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase =True
for model_class in self.all_model_classes:
_UpperCAmelCase =True
_UpperCAmelCase =False
_UpperCAmelCase =True
_UpperCAmelCase =model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase =model(**self._prepare_for_class(_snake_case , _snake_case ) )
_UpperCAmelCase =outputs.attentions
_UpperCAmelCase =sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase =True
_UpperCAmelCase =model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase =model(**self._prepare_for_class(_snake_case , _snake_case ) )
_UpperCAmelCase =outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
_UpperCAmelCase =(self.model_tester.image_size // 4) ** 2
_UpperCAmelCase =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_UpperCAmelCase =(self.model_tester.image_size // 32) ** 2
_UpperCAmelCase =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_UpperCAmelCase =len(_snake_case )
# Check attention is always last and order is fine
_UpperCAmelCase =True
_UpperCAmelCase =True
_UpperCAmelCase =model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase =model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
_UpperCAmelCase =outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
_UpperCAmelCase =(self.model_tester.image_size // 4) ** 2
_UpperCAmelCase =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def SCREAMING_SNAKE_CASE ( self ):
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase =model(**self._prepare_for_class(_snake_case , _snake_case ) )
_UpperCAmelCase =outputs.hidden_states
_UpperCAmelCase =self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase =True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase =True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase =True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
_UpperCAmelCase =model_class(_snake_case )
model.to(_snake_case )
model.train()
_UpperCAmelCase =self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
_UpperCAmelCase =model(**_snake_case ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase =SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowerCamelCase__ ( ):
_UpperCAmelCase =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ):
# only resize + normalize
_UpperCAmelCase =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
_UpperCAmelCase =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_snake_case )
_UpperCAmelCase =prepare_img()
_UpperCAmelCase =image_processor(images=_snake_case , return_tensors="pt" )
_UpperCAmelCase =encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
_UpperCAmelCase =model(_snake_case )
_UpperCAmelCase =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _snake_case )
_UpperCAmelCase =torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# only resize + normalize
_UpperCAmelCase =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
_UpperCAmelCase =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(_snake_case )
_UpperCAmelCase =prepare_img()
_UpperCAmelCase =image_processor(images=_snake_case , return_tensors="pt" )
_UpperCAmelCase =encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
_UpperCAmelCase =model(_snake_case )
_UpperCAmelCase =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _snake_case )
_UpperCAmelCase =torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# only resize + normalize
_UpperCAmelCase =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
_UpperCAmelCase =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_snake_case )
_UpperCAmelCase =prepare_img()
_UpperCAmelCase =image_processor(images=_snake_case , return_tensors="pt" )
_UpperCAmelCase =encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
_UpperCAmelCase =model(_snake_case )
_UpperCAmelCase =outputs.logits.detach().cpu()
_UpperCAmelCase =image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(500, 300)] )
_UpperCAmelCase =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _snake_case )
_UpperCAmelCase =image_processor.post_process_semantic_segmentation(outputs=_snake_case )
_UpperCAmelCase =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 700 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_UpperCAmelCase =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_UpperCAmelCase =tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_UpperCAmelCase =tf_top_k_top_p_filtering(_snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_UpperCAmelCase =output[output != -float("inf" )]
_UpperCAmelCase =tf.cast(
tf.where(tf.not_equal(_snake_case , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-1_2 )
tf.debugging.assert_equal(_snake_case , _snake_case )
@require_tf
class _a ( unittest.TestCase , A__ ):
"""simple docstring"""
if is_tf_available():
snake_case ={
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def SCREAMING_SNAKE_CASE ( self ):
# TF-only test: tf.saved_model export
_UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =2
_UpperCAmelCase =2
class _a ( tf.Module ):
"""simple docstring"""
def __init__( self , _snake_case ):
super(_snake_case , self ).__init__()
_UpperCAmelCase =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=_snake_case , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =self.model.generate(
input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase =[[2, 0], [102, 103]]
_UpperCAmelCase =[[1, 0], [1, 1]]
_UpperCAmelCase =DummyModel(model=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_snake_case , _snake_case , signatures={"serving_default": dummy_model.serving} )
_UpperCAmelCase =tf.saved_model.load(_snake_case ).signatures["serving_default"]
for batch_size in range(1 , len(_snake_case ) + 1 ):
_UpperCAmelCase ={
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
_UpperCAmelCase =serving_func(**_snake_case )["sequences"]
_UpperCAmelCase =test_model.generate(**_snake_case , max_new_tokens=_snake_case )
tf.debugging.assert_equal(_snake_case , _snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# TF-only test: tf.saved_model export
_UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =1
_UpperCAmelCase =2
class _a ( tf.Module ):
"""simple docstring"""
def __init__( self , _snake_case ):
super(_snake_case , self ).__init__()
_UpperCAmelCase =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=_snake_case , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =self.model.generate(
input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase =[[2], [102, 103]]
_UpperCAmelCase =[[1], [1, 1]]
_UpperCAmelCase =DummyModel(model=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_snake_case , _snake_case , signatures={"serving_default": dummy_model.serving} )
_UpperCAmelCase =tf.saved_model.load(_snake_case ).signatures["serving_default"]
for input_row in range(len(_snake_case ) ):
_UpperCAmelCase ={
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
_UpperCAmelCase =serving_func(**_snake_case )["sequences"]
_UpperCAmelCase =test_model.generate(**_snake_case , max_new_tokens=_snake_case )
tf.debugging.assert_equal(_snake_case , _snake_case )
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=_snake_case )
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ):
super().__init__()
_UpperCAmelCase =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_snake_case , "spiece.model" ) , "rb" ).read() )
_UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def SCREAMING_SNAKE_CASE ( self , _snake_case , *_snake_case , **_snake_case ):
_UpperCAmelCase =self.tokenizer.tokenize(_snake_case )
_UpperCAmelCase , _UpperCAmelCase =text.pad_model_inputs(
_snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_UpperCAmelCase =self.model.generate(input_ids=_snake_case , attention_mask=_snake_case )
return self.tokenizer.detokenize(_snake_case )
_UpperCAmelCase =CompleteSentenceTransformer()
_UpperCAmelCase =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
_UpperCAmelCase =complete_model(_snake_case )
_UpperCAmelCase =tf.keras.Model(_snake_case , _snake_case )
keras_model.save(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
# Has PT equivalent: this test relies on random sampling
_UpperCAmelCase ={
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
_UpperCAmelCase =14
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase ="Hello, my dog is cute and"
_UpperCAmelCase =tokenizer(_snake_case , return_tensors="tf" )
_UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
_UpperCAmelCase =model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_UpperCAmelCase =[638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
_UpperCAmelCase =model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def SCREAMING_SNAKE_CASE ( self ):
# Has PT equivalent: ample use of framework-specific code
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
_UpperCAmelCase ="Hugging Face is a technology company based in New York and Paris."
_UpperCAmelCase =bart_tokenizer(_snake_case , return_tensors="tf" ).input_ids
_UpperCAmelCase =TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
_UpperCAmelCase =bart_model.generate(_snake_case ).numpy()
class _a ( A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=None , **_snake_case ):
return super().call(_snake_case , **_snake_case )
_UpperCAmelCase =FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
_UpperCAmelCase =bart_model.generate(_snake_case , foo="bar" ).numpy()
self.assertTrue(np.array_equal(_snake_case , _snake_case ) )
class _a ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self , _snake_case , **_snake_case ):
return super().call(_snake_case , **_snake_case )
_UpperCAmelCase =FakeEncoder(bart_model.config , bart_model.model.shared )
_UpperCAmelCase =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_UpperCAmelCase =bart_model.generate(_snake_case ).numpy()
with self.assertRaises(_snake_case ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_snake_case , foo="bar" )
| 592 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = str(A )
return len(A ) == 9 and set(A ) == set('123456789' )
def UpperCAmelCase ( ):
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
_UpperCAmelCase = 10_0002 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(333 , 99 , -1 ):
_UpperCAmelCase = 100_2003 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 573 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=10 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=None , ) -> Optional[Any]:
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_frames
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = crop_size
def lowerCamelCase_ ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = VivitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = VivitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case )
for video in video_inputs:
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for video in video_inputs:
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> str:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for video in video_inputs:
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 573 | 1 |
def UpperCamelCase ( _A : Any )-> Union[str, Any]:
"""simple docstring"""
A__ = []
A__ = set({"(", "[", "{"} )
A__ = set({")", "]", "}"} )
A__ = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_A ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_A ) == 0 or (len(_A ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_A ) == 0
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
A__ = input("Enter sequence of brackets: " )
if is_balanced(_A ):
print(_A , "is balanced" )
else:
print(_A , "is not balanced" )
if __name__ == "__main__":
main()
| 707 |
UpperCAmelCase_ : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = input("Enter message: " )
A__ = input("Enter key [alphanumeric]: " )
A__ = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
A__ = "encrypt"
A__ = encrypt_message(_A , _A )
elif mode.lower().startswith("d" ):
A__ = "decrypt"
A__ = decrypt_message(_A , _A )
print(f"""\n{mode.title()}ed message:""" )
print(_A )
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
return translate_message(_A , _A , "encrypt" )
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
return translate_message(_A , _A , "decrypt" )
def UpperCamelCase ( _A : str , _A : str , _A : str )-> str:
"""simple docstring"""
A__ = []
A__ = 0
A__ = key.upper()
for symbol in message:
A__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_A )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_A ):
A__ = 0
else:
translated.append(_A )
return "".join(_A )
if __name__ == "__main__":
main()
| 232 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=0 , ):
__magic_name__ : Union[str, Any] = parent
__magic_name__ : int = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : Dict = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Union[str, Any] = use_token_type_ids
__magic_name__ : Optional[Any] = use_labels
__magic_name__ : Dict = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : str = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : List[str] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : Tuple = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : List[Any] = type_sequence_label_size
__magic_name__ : Any = initializer_range
__magic_name__ : Dict = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Optional[int] = scope
__magic_name__ : Optional[Any] = projection_dim
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__magic_name__ : int = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] = None
if self.use_token_type_ids:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Tuple = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
__magic_name__ : Union[str, Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Any = TFDPRContextEncoder(config=_a )
__magic_name__ : Union[str, Any] = model(_a , attention_mask=_a , token_type_ids=_a )
__magic_name__ : Dict = model(_a , token_type_ids=_a )
__magic_name__ : int = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Dict = TFDPRQuestionEncoder(config=_a )
__magic_name__ : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a )
__magic_name__ : Dict = model(_a , token_type_ids=_a )
__magic_name__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : str = TFDPRReader(config=_a )
__magic_name__ : Union[str, Any] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Any = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = TFDPRModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Tuple = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Union[str, Any] = TFDPRReader.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
__magic_name__ : int = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
__magic_name__ : Union[str, Any] = model(_a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__magic_name__ : Union[str, Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 124 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=False , _a=False , _a=False , _a=2 , _a=99 , _a=0 , _a=32 , _a=5 , _a=4 , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=2 , _a=4 , _a="last" , _a=True , _a=None , _a=0 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Optional[Any] = seq_length
__magic_name__ : Tuple = is_training
__magic_name__ : str = use_input_lengths
__magic_name__ : List[str] = use_token_type_ids
__magic_name__ : Optional[Any] = use_labels
__magic_name__ : Union[str, Any] = gelu_activation
__magic_name__ : Optional[int] = sinusoidal_embeddings
__magic_name__ : Tuple = causal
__magic_name__ : Tuple = asm
__magic_name__ : Dict = n_langs
__magic_name__ : Tuple = vocab_size
__magic_name__ : Tuple = n_special
__magic_name__ : Optional[Any] = hidden_size
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : str = type_sequence_label_size
__magic_name__ : int = initializer_range
__magic_name__ : Optional[int] = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : int = summary_type
__magic_name__ : int = use_proj
__magic_name__ : Any = scope
__magic_name__ : Optional[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_input_lengths:
__magic_name__ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__magic_name__ : Optional[int] = None
if self.use_token_type_ids:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__magic_name__ : Dict = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : str = ids_tensor([self.batch_size] , 2 ).float()
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[int] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : int = XLMModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Any = model(_a , lengths=_a , langs=_a )
__magic_name__ : Any = model(_a , langs=_a )
__magic_name__ : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Union[str, Any] = XLMWithLMHeadModel(_a )
model.to(_a )
model.eval()
__magic_name__ : str = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Optional[Any] = XLMForQuestionAnsweringSimple(_a )
model.to(_a )
model.eval()
__magic_name__ : int = model(_a )
__magic_name__ : Optional[int] = model(_a , start_positions=_a , end_positions=_a )
__magic_name__ : List[str] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Tuple = XLMForQuestionAnswering(_a )
model.to(_a )
model.eval()
__magic_name__ : List[Any] = model(_a )
__magic_name__ : Union[str, Any] = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , p_mask=_a , )
__magic_name__ : str = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , )
((__magic_name__) , ) : Optional[Any] = result_with_labels.to_tuple()
__magic_name__ : int = model(_a , start_positions=_a , end_positions=_a )
((__magic_name__) , ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : List[str] = XLMForSequenceClassification(_a )
model.to(_a )
model.eval()
__magic_name__ : Union[str, Any] = model(_a )
__magic_name__ : Union[str, Any] = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Union[str, Any] = XLMForTokenClassification(_a )
model.to(_a )
model.eval()
__magic_name__ : List[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : List[Any] = self.num_choices
__magic_name__ : Optional[int] = XLMForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__magic_name__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Any = config_and_inputs
__magic_name__ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase__ = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=False ):
__magic_name__ : Union[str, Any] = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__magic_name__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
__magic_name__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = XLMModelTester(self )
__magic_name__ : str = ConfigTester(self , config_class=_a , emb_dim=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ):
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_attentions in attentions] , [True] * len(_a ) )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_a ):
# adds PAD dummy token
__magic_name__ : str = min_length + idx + 1
__magic_name__ : Dict = min_length + idx + 1
__magic_name__ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_a ) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ):
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_hidden_states in hidden_states] , [True] * len(_a ) , )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_a ):
# adds PAD dummy token
__magic_name__ : str = min_length + idx + 1
__magic_name__ : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_a ) , )
pass
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Any = XLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(_a )
__magic_name__ : Optional[int] = torch.tensor([[14, 447]] , dtype=torch.long , device=_a ) # the president
__magic_name__ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__magic_name__ : Optional[int] = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _a )
| 124 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase = list[tuple[int, int]]
UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Tuple:
lowerCAmelCase = pos_x
lowerCAmelCase = pos_y
lowerCAmelCase = (pos_y, pos_x)
lowerCAmelCase = goal_x
lowerCAmelCase = goal_y
lowerCAmelCase = g_cost
lowerCAmelCase = parent
lowerCAmelCase = self.calculate_heuristic()
def __snake_case ( self ) -> float:
lowerCAmelCase = abs(self.pos_x - self.goal_x )
lowerCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , A_ ) -> bool:
return self.f_cost < other.f_cost
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_ ) -> int:
lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A_ )
lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , A_ )
lowerCAmelCase = [self.start]
lowerCAmelCase = []
lowerCAmelCase = False
def __snake_case ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase = True
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
lowerCAmelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
if not self.reached:
return [self.start.pos]
return None
def __snake_case ( self , A_ ) -> list[Node]:
lowerCAmelCase = []
for action in delta:
lowerCAmelCase = parent.pos_x + action[1]
lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_ , A_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A_ , ) )
return successors
def __snake_case ( self , A_ ) -> Path:
lowerCAmelCase = node
lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase = (0, 0)
UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
UpperCAmelCase = GreedyBestFirst(init, goal)
UpperCAmelCase = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase = 2
for elem in grid:
print(elem) | 702 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , A_ , )
super().__init__(*A_ , **A_ ) | 344 | 0 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple[int, int]:
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None ):
lowercase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase_ = math.ceil(val / multiple ) * multiple
return x
lowercase_ = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size
lowercase_ , lowercase_ = get_image_size(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = output_size
# determine new height and width
lowercase_ = output_height / input_height
lowercase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase_ = scale_width
else:
# fit height
lowercase_ = scale_height
lowercase_ = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ )
lowercase_ = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ )
return (new_height, new_width)
class _a ( __a ):
"""simple docstring"""
A_ = ['''pixel_values''']
def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = False , lowercase_ : int = 1 , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
super().__init__(**lowercase_ )
lowercase_ = size if size is not None else {"""height""": 384, """width""": 384}
lowercase_ = get_size_dict(lowercase_ )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = keep_aspect_ratio
lowercase_ = ensure_multiple_of
lowercase_ = resample
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : bool = False , lowercase_ : int = 1 , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ):
'''simple docstring'''
lowercase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase_ = get_resize_output_image_size(
lowercase_ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Any , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(lowercase_ )
lowercase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Tuple] = None ):
'''simple docstring'''
lowercase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase_ ):
lowercase_ = target_sizes.numpy()
lowercase_ = []
for idx in range(len(lowercase_ ) ):
lowercase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase_ )
lowercase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowercase_ = logits.argmax(dim=1 )
lowercase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 451 | '''simple docstring'''
import math
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 1:
lowercase_ = f"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowercase_ = int(math.log(number // 3 , 2 ) ) + 2
lowercase_ = [3, 5]
lowercase_ = 2
lowercase_ = 3
for block in range(1 , SCREAMING_SNAKE_CASE_ ):
for _ in range(SCREAMING_SNAKE_CASE_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 451 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__snake_case :Tuple = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Tuple=18 , __SCREAMING_SNAKE_CASE : List[Any]=30 , __SCREAMING_SNAKE_CASE : Tuple=400 , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ):
'''simple docstring'''
__a = size if size is not None else {'''height''': 20, '''width''': 20}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = size
__a = do_normalize
__a = do_convert_rgb
__a = [512, 1_024, 2_048, 4_096]
__a = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__a = Image.open(requests.get(__lowercase , stream=__lowercase).raw).convert('''RGB''')
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' ,)
@require_torch
@require_vision
class _A ( lowercase__ ,unittest.TestCase ):
UpperCamelCase__ : Any = PixaStructImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = PixaStructImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowercase , '''do_normalize'''))
self.assertTrue(hasattr(__lowercase , '''do_convert_rgb'''))
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.image_processor_tester.prepare_dummy_image()
__a = self.image_processing_class(**self.image_processor_dict)
__a = 2_048
__a = image_processor(__lowercase , return_tensors='''pt''' , max_patches=__lowercase)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06) , atol=1E-3 , rtol=1E-3))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase)
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image)
# Test not batched input
__a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__a = image_processor(
__lowercase , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase)
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image)
# Test not batched input
__a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__a = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowercase):
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches
__a = '''Hello'''
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase , header_text=__lowercase).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__a = image_processor(
__lowercase , return_tensors='''pt''' , max_patches=__lowercase , header_text=__lowercase).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase)
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray)
__a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__a = image_processor(
__lowercase , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase)
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor)
# Test not batched input
__a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__a = image_processor(
__lowercase , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' ,)
@require_torch
@require_vision
class _A ( lowercase__ ,unittest.TestCase ):
UpperCamelCase__ : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = PixaStructImageProcessingTester(self , num_channels=4)
__a = 3
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowercase , '''do_normalize'''))
self.assertTrue(hasattr(__lowercase , '''do_convert_rgb'''))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase)
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image)
# Test not batched input
__a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__a = image_processor(
__lowercase , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 715 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__snake_case :List[Any] = '''zero2'''
__snake_case :Optional[Any] = '''zero3'''
__snake_case :str = [ZEROa, ZEROa]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__snake_case :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _A ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE)
return output_dir
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE)
__a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__a = self.get_launcher(__SCREAMING_SNAKE_CASE)
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
return output_dir
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 60 | 0 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__UpperCAmelCase ="""\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__UpperCAmelCase ="""\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__UpperCAmelCase ="""
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowercase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="auto" , UpperCamelCase__=-1 , UpperCamelCase__=0.9 , UpperCamelCase__=5 , UpperCamelCase__=5_00 , UpperCamelCase__="gpt2-large" , UpperCamelCase__=-1 , UpperCamelCase__=10_24 , UpperCamelCase__=25 , UpperCamelCase__=5 , UpperCamelCase__=True , UpperCamelCase__=25 , ):
'''simple docstring'''
A__ = compute_mauve(
p_text=UpperCamelCase__ , q_text=UpperCamelCase__ , p_features=UpperCamelCase__ , q_features=UpperCamelCase__ , p_tokens=UpperCamelCase__ , q_tokens=UpperCamelCase__ , num_buckets=UpperCamelCase__ , pca_max_data=UpperCamelCase__ , kmeans_explained_var=UpperCamelCase__ , kmeans_num_redo=UpperCamelCase__ , kmeans_max_iter=UpperCamelCase__ , featurize_model_name=UpperCamelCase__ , device_id=UpperCamelCase__ , max_text_length=UpperCamelCase__ , divergence_curve_discretization_size=UpperCamelCase__ , mauve_scaling_factor=UpperCamelCase__ , verbose=UpperCamelCase__ , seed=UpperCamelCase__ , )
return out | 337 |
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
A__ = data
A__ = previous
A__ = next_node
def __str__( self ):
'''simple docstring'''
return f"""{self.data}"""
def lowercase_ ( self ):
'''simple docstring'''
return self.data
def lowercase_ ( self ):
'''simple docstring'''
return self.next
def lowercase_ ( self ):
'''simple docstring'''
return self.previous
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = head
def __iter__( self ):
'''simple docstring'''
return self
def lowercase_ ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
A__ = self.current.get_data()
A__ = self.current.get_next()
return value
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = None # First node in list
A__ = None # Last node in list
def __str__( self ):
'''simple docstring'''
A__ = self.head
A__ = []
while current is not None:
nodes.append(current.get_data() )
A__ = current.get_next()
return " ".join(str(UpperCamelCase__ ) for node in nodes )
def __contains__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.head
while current:
if current.get_data() == value:
return True
A__ = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def lowercase_ ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowercase_ ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.head is None:
A__ = node
A__ = node
else:
self.insert_before_node(self.head , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.insert_after_node(self.tail , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = Node(UpperCamelCase__ )
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.set_tail(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
A__ = node.previous
if node.get_previous() is None:
A__ = node_to_insert
else:
A__ = node_to_insert
A__ = node_to_insert
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
A__ = node.next
if node.get_next() is None:
A__ = node_to_insert
else:
A__ = node_to_insert
A__ = node_to_insert
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = 1
A__ = Node(UpperCamelCase__ )
A__ = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCamelCase__ , UpperCamelCase__ )
return
current_position += 1
A__ = node.next
self.insert_after_node(self.tail , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.head
while node:
if node.get_data() == item:
return node
A__ = node.get_next()
raise Exception("Node not found" )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if (node := self.get_node(UpperCamelCase__ )) is not None:
if node == self.head:
A__ = self.head.get_next()
if node == self.tail:
A__ = self.tail.get_previous()
self.remove_node_pointers(UpperCamelCase__ )
@staticmethod
def lowercase_ ( UpperCamelCase__ ):
'''simple docstring'''
if node.get_next():
A__ = node.previous
if node.get_previous():
A__ = node.next
A__ = None
A__ = None
def lowercase_ ( self ):
'''simple docstring'''
return self.head is None
def __a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 337 | 1 |
from typing import Any
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# Creates data structures and fill initial step
UpperCamelCase__ : dict = {}
UpperCamelCase__ : dict = {}
for state in states_space:
UpperCamelCase__ : Dict = observations_space[0]
UpperCamelCase__ : List[str] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase__ : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCamelCase__ ) ):
UpperCamelCase__ : Dict = observations_space[o]
UpperCamelCase__ : Optional[int] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : List[Any] = -1
for k_state in states_space:
UpperCamelCase__ : Optional[int] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase__ : Dict = probability
UpperCamelCase__ : List[Any] = k_state
# Update probabilities and pointers dicts
UpperCamelCase__ : int = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase__ : Any = arg_max
# The final observation
UpperCamelCase__ : Optional[Any] = observations_space[len(UpperCamelCase__ ) - 1]
# argmax for given final observation
UpperCamelCase__ : Union[str, Any] = ''''''
UpperCamelCase__ : str = -1
for k_state in states_space:
UpperCamelCase__ : Tuple = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase__ : Union[str, Any] = probability
UpperCamelCase__ : Optional[Any] = k_state
UpperCamelCase__ : List[str] = arg_max
# Process pointers backwards
UpperCamelCase__ : str = last_state
UpperCamelCase__ : List[str] = []
for o in range(len(UpperCamelCase__ ) - 1 , -1 , -1 ):
result.append(UpperCamelCase__ )
UpperCamelCase__ : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validate_not_empty(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
_validate_lists(UpperCamelCase__ , UpperCamelCase__ )
_validate_dicts(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
_validate_list(UpperCamelCase__ , '''observations_space''' )
_validate_list(UpperCamelCase__ , '''states_space''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if not isinstance(_object , UpperCamelCase__ ):
UpperCamelCase__ : List[str] = f'''{var_name} must be a list'''
raise ValueError(UpperCamelCase__ )
else:
for x in _object:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Optional[Any] = f'''{var_name} must be a list of strings'''
raise ValueError(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validate_dict(UpperCamelCase__ , '''initial_probabilities''' , UpperCamelCase__ )
_validate_nested_dict(UpperCamelCase__ , '''transition_probabilities''' )
_validate_nested_dict(UpperCamelCase__ , '''emission_probabilities''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
_validate_dict(_object , UpperCamelCase__ , UpperCamelCase__ )
for x in _object.values():
_validate_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ):
if not isinstance(_object , UpperCamelCase__ ):
UpperCamelCase__ : List[str] = f'''{var_name} must be a dict'''
raise ValueError(UpperCamelCase__ )
if not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for x in _object ):
UpperCamelCase__ : Tuple = f'''{var_name} all keys must be strings'''
raise ValueError(UpperCamelCase__ )
if not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for x in _object.values() ):
UpperCamelCase__ : Optional[Any] = '''nested dictionary ''' if nested else ''''''
UpperCamelCase__ : Optional[Any] = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
lowerCamelCase , lowerCamelCase =parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
lowerCamelCase =rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
lowerCamelCase =rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase =args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 462 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ (a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = StableDiffusionInstructPixaPixPipeline
a__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self :Dict ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
snake_case_ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ : Optional[Any] = CLIPTextModel(lowerCAmelCase__ )
snake_case_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case_ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple=0 ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" )
if str(lowerCAmelCase__ ).startswith("mps" ):
snake_case_ : List[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
snake_case_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
snake_case_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[int] = self.get_dummy_components()
snake_case_ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
snake_case_ : Dict = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : Optional[int] = sd_pipe(**lowerCAmelCase__ ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : List[str] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : List[str] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
snake_case_ : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : List[Any] = "french fries"
snake_case_ : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
snake_case_ : Dict = output.images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : Optional[Any] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Any = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
snake_case_ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : Optional[Any] = [inputs["prompt"]] * 2
snake_case_ : Any = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0
snake_case_ : Optional[int] = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
snake_case_ : int = image / 2 + 0.5
snake_case_ : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
snake_case_ : int = image.repeat(2 , 1 , 1 , 1 )
snake_case_ : List[str] = sd_pipe(**lowerCAmelCase__ ).images
snake_case_ : Optional[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
snake_case_ : Tuple = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[int] = self.get_dummy_components()
snake_case_ : Dict = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
snake_case_ : Any = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
snake_case_ : Union[str, Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : List[str] = sd_pipe(**lowerCAmelCase__ ).images
snake_case_ : str = image[0, -3:, -3:, -1]
snake_case_ : Any = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
snake_case_ : Any = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : str = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
snake_case_ : Tuple = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
snake_case_ : List[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type="pt" ) )[0]
snake_case_ : Dict = components["vae"]
snake_case_ : Any = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case_ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case_ : Union[str, Any] = pipe(**lowerCAmelCase__ )[0]
snake_case_ : Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :Optional[Any] , lowerCAmelCase__ :List[str]=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = torch.manual_seed(lowerCAmelCase__ )
snake_case_ : Dict = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
snake_case_ : List[str] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
snake_case_ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = self.get_inputs()
snake_case_ : Tuple = pipe(**lowerCAmelCase__ ).images
snake_case_ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case_ : Union[str, Any] = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ )
snake_case_ : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Any = self.get_inputs()
snake_case_ : Optional[Any] = pipe(**lowerCAmelCase__ ).images
snake_case_ : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case_ : Dict = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _A ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ )
snake_case_ : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Any = self.get_inputs()
snake_case_ : int = pipe(**lowerCAmelCase__ ).images
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case_ : List[str] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
snake_case_ : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case_ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case_ : Optional[int] = latents[0, -3:, -3:, -1]
snake_case_ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case_ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case_ : List[str] = latents[0, -3:, -3:, -1]
snake_case_ : Dict = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case_ : List[Any] = False
snake_case_ : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
snake_case_ : Tuple = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
snake_case_ : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : List[str] = self.get_inputs()
snake_case_ : Optional[int] = pipe(**lowerCAmelCase__ )
snake_case_ : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : Any = inputs["image"].resize((504, 504) )
snake_case_ : str = "timbrooks/instruct-pix2pix"
snake_case_ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : List[str] = pipe(**lowerCAmelCase__ )
snake_case_ : str = output.images[0]
snake_case_ : Union[str, Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
snake_case_ : Optional[Any] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ )
snake_case_ : Optional[int] = {
"repo_id": str(__magic_name__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ ,indent=4 )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case_ : Any = 0
snake_case_ : Any = -1
snake_case_ : Tuple = True
snake_case_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] )
snake_case_ : int = int(os.environ["N_GPU_NODE"] )
snake_case_ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ : Dict = params.world_size // params.n_gpu_per_node
snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node
snake_case_ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ : Optional[int] = 1
snake_case_ : str = 0
snake_case_ : List[Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
snake_case_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ : str = params.node_id == 0 and params.local_rank == 0
snake_case_ : str = params.n_nodes > 1
# summary
snake_case_ : str = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase_ = MobileBertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_ = MobileBertForPreTraining(__snake_case )
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 701 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = inspect.getfile(accelerate.test_utils )
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
_SCREAMING_SNAKE_CASE : Optional[Any] = ['''accelerate''', '''launch''']
_SCREAMING_SNAKE_CASE : Optional[int] = Path.home() / '''.cache/huggingface/accelerate'''
_SCREAMING_SNAKE_CASE : str = '''default_config.yaml'''
_SCREAMING_SNAKE_CASE : Optional[int] = config_folder / config_file
_SCREAMING_SNAKE_CASE : Optional[Any] = config_folder / '''_default_config.yaml'''
_SCREAMING_SNAKE_CASE : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : Union[str, Any] ) -> str:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=__UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(__UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = '''test-tpu'''
_SCREAMING_SNAKE_CASE : List[str] = '''us-central1-a'''
_SCREAMING_SNAKE_CASE : Optional[int] = '''ls'''
_SCREAMING_SNAKE_CASE : Dict = ['''accelerate''', '''tpu-config''']
_SCREAMING_SNAKE_CASE : List[Any] = '''cd /usr/share'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''tests/test_samples/test_command_file.sh'''
_SCREAMING_SNAKE_CASE : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=__UpperCAmelCase )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __UpperCAmelCase , )
| 559 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_a = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
_a = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
_a = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_a = F"""down_blocks.{i}.resnets.{j}."""
_a = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_a = F"""down_blocks.{i}.attentions.{j}."""
_a = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_a = F"""up_blocks.{i}.resnets.{j}."""
_a = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_a = F"""up_blocks.{i}.attentions.{j}."""
_a = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_a = F"""down_blocks.{i}.downsamplers.0.conv."""
_a = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_a = F"""up_blocks.{i}.upsamplers.0."""
_a = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_a = """mid_block.attentions.0."""
_a = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_a = F"""mid_block.resnets.{j}."""
_a = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCamelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
_UpperCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_a = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_a = F"""encoder.down_blocks.{i}.resnets.{j}."""
_a = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_a = F"""down_blocks.{i}.downsamplers.0."""
_a = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_a = F"""up_blocks.{i}.upsamplers.0."""
_a = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_a = F"""decoder.up_blocks.{i}.resnets.{j}."""
_a = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_a = F"""mid_block.resnets.{i}."""
_a = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_a = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
return w.reshape(*w.shape, 1, 1 )
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
_UpperCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCamelCase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
_UpperCamelCase = reshape_weight_for_sd(__snake_case )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_a = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
_a = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_a = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_a = {"""q""": 0, """k""": 1, """v""": 2}
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
_UpperCamelCase = k[: -len('''.q_proj.weight''' )]
_UpperCamelCase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
_UpperCamelCase = k[: -len('''.q_proj.bias''' )]
_UpperCamelCase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = torch.cat(__snake_case )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = torch.cat(__snake_case )
return new_state_dict
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
_a = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
_a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
_a = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_a = load_file(unet_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
_a = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
_a = load_file(vae_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
_a = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
_a = load_file(text_enc_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
_a = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
_a = convert_unet_state_dict(unet_state_dict)
_a = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_a = convert_vae_state_dict(vae_state_dict)
_a = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_a = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_a = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
_a = convert_text_enc_state_dict_vaa(text_enc_dict)
_a = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
_a = convert_text_enc_state_dict(text_enc_dict)
_a = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_a = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_a = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_a = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 19 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A_ = None
A_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class snake_case :
'''simple docstring'''
UpperCAmelCase : bool = True
UpperCAmelCase : Optional[str] = None
# Automatically constructed
UpperCAmelCase : ClassVar[str] = "PIL.Image.Image"
UpperCAmelCase : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
UpperCAmelCase : str = field(default="""Image""" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.pa_type
def _lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = np.array(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _lowercase ( self : Dict , lowerCAmelCase_ : dict , lowerCAmelCase_ : Any=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = PIL.Image.open(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ = path.split('''::''' )[-1]
try:
SCREAMING_SNAKE_CASE_ = string_to_dict(lowerCAmelCase_ , config.HUB_DATASETS_URL )['''repo_id''']
SCREAMING_SNAKE_CASE_ = token_per_repo_id.get(lowerCAmelCase_ )
except ValueError:
SCREAMING_SNAKE_CASE_ = None
with xopen(lowerCAmelCase_ , '''rb''' , use_auth_token=lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE_ = BytesIO(f.read() )
SCREAMING_SNAKE_CASE_ = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _lowercase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _lowercase ( self : List[Any] , lowerCAmelCase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field('''bytes''' )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field('''path''' )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array(
[encode_np_array(np.array(lowerCAmelCase_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def _lowercase ( self : Dict , lowerCAmelCase_ : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ : List[str] ):
with xopen(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
return bytes_
SCREAMING_SNAKE_CASE_ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array(
[os.path.basename(lowerCAmelCase_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCAmelCase ( UpperCAmelCase )-> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_ = image.format
else:
SCREAMING_SNAKE_CASE_ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(UpperCAmelCase ,format=UpperCAmelCase )
return buffer.getvalue()
def UpperCAmelCase ( UpperCAmelCase )-> dict:
'''simple docstring'''
if hasattr(UpperCAmelCase ,'''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase )}
def UpperCAmelCase ( UpperCAmelCase )-> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
SCREAMING_SNAKE_CASE_ = array.dtype
SCREAMING_SNAKE_CASE_ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_ = dtype.kind
SCREAMING_SNAKE_CASE_ = dtype.itemsize
SCREAMING_SNAKE_CASE_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_ = dtype_byteorder + dtype_kind + str(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = np.dtype(UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE_ = PIL.Image.fromarray(array.astype(UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase )}
def UpperCAmelCase ( UpperCAmelCase )-> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = first_non_null_value(UpperCAmelCase )
if isinstance(UpperCAmelCase ,UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase ,np.ndarray ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(UpperCAmelCase )
return [obj_to_image_dict_func(UpperCAmelCase ) for obj in objs]
elif isinstance(UpperCAmelCase ,PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(UpperCAmelCase )
return [obj_to_image_dict_func(UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 393 | 0 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCamelCase_ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCamelCase_ = TaTokenizerFast
UpperCamelCase_ = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCamelCase_ = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 625 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( A , A , A , A=1024 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ =[], []
UpperCAmelCase__ =list(zip(A , A ) )
UpperCAmelCase__ , UpperCAmelCase__ =sorted_examples[0]
def is_too_big(A ):
return tok(A , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase__ =new_src + " " + src
UpperCAmelCase__ =new_tgt + " " + tgt
if is_too_big(A ) or is_too_big(A ): # cant fit, finalize example
finished_src.append(A )
finished_tgt.append(A )
UpperCAmelCase__ , UpperCAmelCase__ =src, tgt
else: # can fit, keep adding
UpperCAmelCase__ , UpperCAmelCase__ =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(A )
finished_tgt.append(A )
return finished_src, finished_tgt
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =Path(A )
save_path.mkdir(exist_ok=A )
for split in ["train"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ , UpperCAmelCase__ =pack_examples(A , A , A , A )
print(F"""packed {split} split from {len(A )} examples -> {len(A )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(A ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(A ) )
for split in ["val", "test"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(A , save_path / F"""{split}.source""" )
shutil.copyfile(A , save_path / F"""{split}.target""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=A , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=A , default=128 )
parser.add_argument("--data_dir" , type=A )
parser.add_argument("--save_path" , type=A )
UpperCAmelCase__ =parser.parse_args()
UpperCAmelCase__ =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(A , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 625 | 1 |
"""simple docstring"""
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ):
if index == number_of_items:
return 0
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : str = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ : List[str] = values[index] + knapsack(
A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 )
return max(A__ ,A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ : Tuple = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ : Any = model(lowerCAmelCase_ )["last_hidden_state"]
UpperCAmelCase_ : str = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 463 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : List[str] = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
def __snake_case ( _UpperCamelCase ) -> list[int]:
if length <= 0 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_UpperCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 487 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCamelCase ( lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( lowercase : int ) -> list[int]:
_a = str(lowercase )
_a = [n]
for i in range(1 , len(lowercase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCamelCase ( lowercase : int ) -> bool:
if len(str(lowercase ) ) > 3:
if not is_prime(int(str(lowercase )[-3:] ) ) or not is_prime(int(str(lowercase )[:3] ) ):
return False
return True
def _lowerCamelCase ( lowercase : int = 11 ) -> list[int]:
_a = []
_a = 13
while len(lowercase ) != count:
if validate(lowercase ):
_a = list_truncated_nums(lowercase )
if all(is_prime(lowercase ) for i in list_nums ):
list_truncated_primes.append(lowercase )
num += 2
return list_truncated_primes
def _lowerCamelCase ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""")
| 521 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='CLIPImageProcessor'
__a =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : str , __a : List[Any]=None , __a : Any=None , **__a : int ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : List[Any] , __a : Optional[int]=None , __a : Dict=None , __a : List[Any]=None , **__a : Dict ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_a = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : Tuple , *__a : List[Any] , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[Any] , *__a : Union[str, Any] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 521 | 1 |
Subsets and Splits